From 47b4cf766e4524d635b8f539123a1d52c7bfc490 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 21 Mar 2024 22:15:52 +0100 Subject: [PATCH 001/482] CI: Add FreeBSD 13.3 and 14.0 for devel, move FreeBSD 13.2 to stable-2.16 (#8122) * Add FreeBSD 13.3 and 14.0 for devel, move FreeBSD 13.2 to stable-2.16. * Skip some targets. * Skip pkgng jail tests (again :( ). --- .azure-pipelines/azure-pipelines.yml | 10 ++++++---- tests/integration/targets/filter_jc/aliases | 2 ++ tests/integration/targets/iso_extract/aliases | 2 ++ tests/integration/targets/pkgng/tasks/freebsd.yml | 9 ++++++++- 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 815e7d5556..3d5995e149 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -191,8 +191,10 @@ stages: test: macos/14.3 - name: RHEL 9.3 test: rhel/9.3 - - name: FreeBSD 13.2 - test: freebsd/13.2 + - name: FreeBSD 13.3 + test: freebsd/13.3 + - name: FreeBSD 14.0 + test: freebsd/14.0 groups: - 1 - 2 @@ -211,8 +213,8 @@ stages: test: rhel/9.2 - name: RHEL 8.8 test: rhel/8.8 - #- name: FreeBSD 13.2 - # test: freebsd/13.2 + - name: FreeBSD 13.2 + test: freebsd/13.2 groups: - 1 - 2 diff --git a/tests/integration/targets/filter_jc/aliases b/tests/integration/targets/filter_jc/aliases index 96f3239964..4e11515666 100644 --- a/tests/integration/targets/filter_jc/aliases +++ b/tests/integration/targets/filter_jc/aliases @@ -4,3 +4,5 @@ azp/posix/2 skip/python2.7 # jc only supports python3.x +skip/freebsd13.3 # FIXME - ruyaml compilation fails +skip/freebsd14.0 # FIXME - ruyaml compilation fails diff --git a/tests/integration/targets/iso_extract/aliases b/tests/integration/targets/iso_extract/aliases index c43162366d..5ddca1ecbb 100644 --- a/tests/integration/targets/iso_extract/aliases +++ b/tests/integration/targets/iso_extract/aliases @@ -13,3 +13,5 @@ skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME skip/freebsd12.4 # FIXME skip/freebsd13.2 # FIXME +skip/freebsd13.3 # FIXME +skip/freebsd14.0 # FIXME diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml index 0c8001899f..9d4ecf8bb2 100644 --- a/tests/integration/targets/pkgng/tasks/freebsd.yml +++ b/tests/integration/targets/pkgng/tasks/freebsd.yml @@ -515,11 +515,18 @@ # NOTE: FreeBSD 13.2 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 13.3 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # + # NOTE: FreeBSD 14.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # # See also # https://github.com/ansible-collections/community.general/issues/5795 when: >- (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<')) - or ansible_distribution_version is version('13.3', '>=') + or (ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14.0', '<')) + or ansible_distribution_version is version('14.1', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml From da048aa12e7628fc35c700321a3e109514ad5825 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 22 Mar 2024 12:48:32 +0100 Subject: [PATCH 002/482] CI: Move Alpine 3.18 docker to stable-2.16, add Alpine 3.19 docker, bump Alpine VM to 3.19 (#8127) Move Alpine 3.18 docker to stable-2.16, add Alpine 3.19 docker, bump Alpine VM to 3.19. --- .azure-pipelines/azure-pipelines.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 3d5995e149..163d71b628 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -171,8 +171,8 @@ stages: parameters: testFormat: devel/{0} targets: - - name: Alpine 3.18 - test: alpine/3.18 + - name: Alpine 3.19 + test: alpine/3.19 # - name: Fedora 39 # test: fedora/39 - name: Ubuntu 22.04 @@ -275,8 +275,8 @@ stages: test: ubuntu2004 - name: Ubuntu 22.04 test: ubuntu2204 - - name: Alpine 3 - test: alpine3 + - name: Alpine 3.19 + test: alpine319 groups: - 1 - 2 @@ -293,6 +293,8 @@ stages: test: fedora38 - name: openSUSE 15 test: opensuse15 + - name: Alpine 3 + test: alpine3 groups: - 1 - 2 From fb67df3051930fdee5f1d8320fc3ceb3836e5018 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 23 Mar 2024 12:28:41 +0100 Subject: [PATCH 003/482] Ignore pylint warnings for construct that does not work with Python 2 (#8130) * Ignore pylint warnings for construct that does not work with Python 2. * Revert "Ignore pylint warnings for construct that does not work with Python 2." This reverts commit 51d559cc94147e1ca076ed74491f9e9d1d2def04. * Different approach: use ignore.txt since otherwise ansible-core 2.14 tests fail. --- tests/sanity/ignore-2.17.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt index 397c6d9865..d75aaeac27 100644 --- a/tests/sanity/ignore-2.17.txt +++ b/tests/sanity/ignore-2.17.txt @@ -12,4 +12,6 @@ plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes From 4947786d3694bee0498c40b54540f67170518b70 Mon Sep 17 00:00:00 2001 From: Gianluca Salvo Date: Sun, 24 Mar 2024 18:01:34 +0100 Subject: [PATCH 004/482] Adds group_by_hostgroups parameter to Icinga2 inventory (#7998) * (lots of commit messages) --------- Co-authored-by: Gianluca Salvo Co-authored-by: Felix Fontein --- ...nventory-group_by_hostgroups-parameter.yml | 2 ++ plugins/inventory/icinga2.py | 19 ++++++++++++++----- tests/unit/plugins/inventory/test_icinga2.py | 3 +++ 3 files changed, 19 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml diff --git a/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml b/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml new file mode 100644 index 0000000000..1170a108fd --- /dev/null +++ b/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998). \ No newline at end of file diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index a418707332..6a6bafdb42 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -63,6 +63,12 @@ DOCUMENTATION = ''' default: address choices: ['name', 'display_name', 'address'] version_added: 4.2.0 + group_by_hostgroups: + description: + - Uses Icinga2 hostgroups as groups. + type: boolean + default: true + version_added: 8.4.0 ''' EXAMPLES = r''' @@ -114,6 +120,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.ssl_verify = None self.host_filter = None self.inventory_attr = None + self.group_by_hostgroups = None self.cache_key = None self.use_cache = None @@ -248,12 +255,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): host_attrs['state'] = 'on' else: host_attrs['state'] = 'off' - host_groups = host_attrs.get('groups') self.inventory.add_host(host_name) - for group in host_groups: - if group not in self.inventory.groups.keys(): - self.inventory.add_group(group) - self.inventory.add_child(group, host_name) + if self.group_by_hostgroups: + host_groups = host_attrs.get('groups') + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_name) # If the address attribute is populated, override ansible_host with the value if host_attrs.get('address') != '': self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) @@ -283,6 +291,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.ssl_verify = self.get_option('validate_certs') self.host_filter = self.get_option('host_filter') self.inventory_attr = self.get_option('inventory_attr') + self.group_by_hostgroups = self.get_option('group_by_hostgroups') if self.templar.is_template(self.icinga2_url): self.icinga2_url = self.templar.template(variable=self.icinga2_url, disable_lookups=False) diff --git a/tests/unit/plugins/inventory/test_icinga2.py b/tests/unit/plugins/inventory/test_icinga2.py index e3928b0dbe..859f29d3b0 100644 --- a/tests/unit/plugins/inventory/test_icinga2.py +++ b/tests/unit/plugins/inventory/test_icinga2.py @@ -86,6 +86,8 @@ def get_option(option): return {} elif option == 'strict': return False + elif option == 'group_by_hostgroups': + return True else: return None @@ -96,6 +98,7 @@ def test_populate(inventory, mocker): inventory.icinga2_password = 'password' inventory.icinga2_url = 'https://localhost:5665' + '/v1' inventory.inventory_attr = "address" + inventory.group_by_hostgroups = True # bypass authentication and API fetch calls inventory._check_api = mocker.MagicMock(side_effect=check_api) From 4363f8764b44a6bf62f671954642f60f9e76186c Mon Sep 17 00:00:00 2001 From: Todd Lewis Date: Sun, 24 Mar 2024 13:02:13 -0400 Subject: [PATCH 005/482] ini_file - support optional spaces around section names (#8075) * ini_file - support optional spaces between section names and their surrounding brackets Some ini files have spaces between some of their section names and the brackets that enclose them. This is documented in the 'openssl.cnf(5)' man page. In order to manage files such as /etc/ssl/openssl.cnf with ini_file before now, one would have to include spaces in the section name like this: section: ' crypto_policy ' option: Options value: UnsafeLegacyRenegotiation This change implements matching section headers with such optional spaces. Existing tasks using the workaround above will continue to work, even in cases where spaces in section headers are subsequently removed. * readability improvement in the test content expressions --------- Co-authored-by: Todd Lewis --- ...5-optional-space-around-section-names.yaml | 2 + plugins/modules/ini_file.py | 4 +- .../targets/ini_file/tasks/main.yml | 3 + .../tasks/tests/07-section_name_spaces.yml | 103 ++++++++++++++++++ 4 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8075-optional-space-around-section-names.yaml create mode 100644 tests/integration/targets/ini_file/tasks/tests/07-section_name_spaces.yml diff --git a/changelogs/fragments/8075-optional-space-around-section-names.yaml b/changelogs/fragments/8075-optional-space-around-section-names.yaml new file mode 100644 index 0000000000..2e44555f08 --- /dev/null +++ b/changelogs/fragments/8075-optional-space-around-section-names.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "ini_file - support optional spaces between section names and their surrounding brackets (https://github.com/ansible-collections/community.general/pull/8075)." diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index 764c73cd95..ec71a94731 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -304,9 +304,11 @@ def do_ini(module, filename, section=None, option=None, values=None, before = after = [] section_lines = [] + section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip()))) + for index, line in enumerate(ini_lines): # find start and end of section - if line.startswith(u'[%s]' % section): + if section_pattern.match(line): within_section = True section_start = index elif line.startswith(u'['): diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index dbd922a9c6..0ed3c28172 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -47,3 +47,6 @@ - name: include tasks to test modify_inactive_option include_tasks: tests/06-modify_inactive_option.yml + + - name: include tasks to test optional spaces in section headings + include_tasks: tests/07-section_name_spaces.yml diff --git a/tests/integration/targets/ini_file/tasks/tests/07-section_name_spaces.yml b/tests/integration/targets/ini_file/tasks/tests/07-section_name_spaces.yml new file mode 100644 index 0000000000..6cdcfef40f --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/07-section_name_spaces.yml @@ -0,0 +1,103 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +## testing support for optional spaces between brackets and section names + +- name: Test-section_name_spaces 1 (does legacy workaround still work) - create test file + ansible.builtin.copy: # noqa risky-file-permissions + dest: "{{ output_file }}" + content: | + [ foo ] + ; bar=baz + +- name: Test-section_name_spaces 1 - update with optional spaces specified + community.general.ini_file: # noqa risky-file-permissions + path: "{{ output_file }}" + section: ' foo ' + option: bar + value: frelt + register: result + +- name: Test-section_name_spaces 1 - read content from output file + ansible.builtin.slurp: + src: "{{ output_file }}" + register: output_content + +- name: Test-section_name_spaces 1 - verify results + vars: + actual_content: "{{ output_content.content | b64decode }}" + expected_content: | + [ foo ] + bar = frelt + ansible.builtin.assert: + that: + - actual_content == expected_content + - result is changed + - result.msg == 'option changed' + + +- name: Test-section_name_spaces 2 (optional spaces omitted) - create test file + ansible.builtin.copy: # noqa risky-file-permissions + dest: "{{ output_file }}" + content: | + [ foo ] + bar=baz" + +- name: Test-section_name_spaces 2 - update without optional spaces + community.general.ini_file: # noqa risky-file-permissions + path: "{{ output_file }}" + section: foo + option: bar + value: frelt + ignore_spaces: true + register: result + +- name: Test-section_name_spaces 2 - read content from output file + ansible.builtin.slurp: + src: "{{ output_file }}" + register: output_content + +- name: Test-section_name_spaces 2 - verify results + vars: + actual_content: "{{ output_content.content | b64decode }}" + expected_content: "[ foo ]\nbar = frelt\n" + ansible.builtin.assert: + that: + - actual_content == expected_content + - result is changed + - result.msg == 'option changed' + + +- name: Test-section_name_spaces 3 (legacy workaround when not required) - create test file + ansible.builtin.copy: # noqa risky-file-permissions + dest: "{{ output_file }}" + content: | + [foo] + ; bar=baz + +- name: Test-section_name_spaces 3 - update with optional spaces specified + community.general.ini_file: # noqa risky-file-permissions + path: "{{ output_file }}" + section: ' foo ' + option: bar + value: frelt + register: result + +- name: Test-section_name_spaces 3 - read content from output file + ansible.builtin.slurp: + src: "{{ output_file }}" + register: output_content + +- name: Test-section_name_spaces 3 - verify results + vars: + actual_content: "{{ output_content.content | b64decode }}" + expected_content: | + [foo] + bar = frelt + ansible.builtin.assert: + that: + - actual_content == expected_content + - result is changed + - result.msg == 'option changed' From 23396e62dc75bb4a9a16f0d1b29d85fb66d93725 Mon Sep 17 00:00:00 2001 From: Maxopoly Date: Sun, 24 Mar 2024 18:02:48 +0100 Subject: [PATCH 006/482] Fix check mode in iptables_state for incomplete iptables-save files along with integration tests (#8029) * Implement integration test to reproduce #7463 * Make new iptables_state checks async * Add missing commit to iptable_state integration test * Remove async when using checkmode in iptables_state integration tests * Do per table comparison in check mode for iptables_state * Calculate changes of iptables state per table based on result * Output target iptables state in checkmode * Refactor calculation of invidual table states in iptables_state * Add missing return for table calculation * Add missing arg to regex check * Remove leftover debug output for target iptable state * Parse per table state from raw state string * Join restored state for extration of table specific rules * Switch arguments for joining restored iptable state * Output final ip table state * Compare content of tables * Complete iptables partial tables test cases * Correct order of test iptables data * Update docu for iptables tables_after * Add changelog fragment * Appease the linting gods for iptables_state * Adjust spelling and remove tables_after from return values --- ...8029-iptables-state-restore-check-mode.yml | 2 + plugins/modules/iptables_state.py | 48 +++++++++----- .../targets/iptables_state/tasks/main.yml | 6 ++ .../tasks/tests/02-partial-restore.yml | 66 +++++++++++++++++++ 4 files changed, 104 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/8029-iptables-state-restore-check-mode.yml create mode 100644 tests/integration/targets/iptables_state/tasks/tests/02-partial-restore.yml diff --git a/changelogs/fragments/8029-iptables-state-restore-check-mode.yml b/changelogs/fragments/8029-iptables-state-restore-check-mode.yml new file mode 100644 index 0000000000..900ea50988 --- /dev/null +++ b/changelogs/fragments/8029-iptables-state-restore-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - iptables_state - fix idempotency issues when restoring incomplete iptables dumps (https://github.com/ansible-collections/community.general/issues/8029). diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index 79c0e26c48..b0cc3bd3f6 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -207,7 +207,9 @@ saved: "# Completed" ] tables: - description: The iptables we have interest for when module starts. + description: + - The iptables on the system before the module has run, separated by table. + - If the option O(table) is used, only this table is included. type: dict contains: table: @@ -346,20 +348,27 @@ def filter_and_format_state(string): return lines -def per_table_state(command, state): +def parse_per_table_state(all_states_dump): ''' Convert raw iptables-save output into usable datastructure, for reliable comparisons between initial and final states. ''' + lines = filter_and_format_state(all_states_dump) tables = dict() - for t in TABLES: - COMMAND = list(command) - if '*%s' % t in state.splitlines(): - COMMAND.extend(['--table', t]) - dummy, out, dummy = module.run_command(COMMAND, check_rc=True) - out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out) - out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out) - tables[t] = [tt for tt in out.splitlines() if tt != ''] + current_table = '' + current_list = list() + for line in lines: + if re.match(r'^[*](filter|mangle|nat|raw|security)$', line): + current_table = line[1:] + continue + if line == 'COMMIT': + tables[current_table] = current_list + current_table = '' + current_list = list() + continue + if line.startswith('# '): + continue + current_list.append(line) return tables @@ -486,7 +495,7 @@ def main(): # Depending on the value of 'table', initref_state may differ from # initial_state. (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) - tables_before = per_table_state(SAVECOMMAND, stdout) + tables_before = parse_per_table_state(stdout) initref_state = filter_and_format_state(stdout) if state == 'saved': @@ -583,14 +592,17 @@ def main(): (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) restored_state = filter_and_format_state(stdout) - + tables_after = parse_per_table_state('\n'.join(restored_state)) if restored_state not in (initref_state, initial_state): - if module.check_mode: - changed = True - else: - tables_after = per_table_state(SAVECOMMAND, stdout) - if tables_after != tables_before: + for table_name, table_content in tables_after.items(): + if table_name not in tables_before: + # Would initialize a table, which doesn't exist yet changed = True + break + if tables_before[table_name] != table_content: + # Content of some table changes + changed = True + break if _back is None or module.check_mode: module.exit_json( @@ -633,7 +645,7 @@ def main(): os.remove(b_back) (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) - tables_rollback = per_table_state(SAVECOMMAND, stdout) + tables_rollback = parse_per_table_state(stdout) msg = ( "Failed to confirm state restored from %s after %ss. " diff --git a/tests/integration/targets/iptables_state/tasks/main.yml b/tests/integration/targets/iptables_state/tasks/main.yml index a74e74df48..d550070677 100644 --- a/tests/integration/targets/iptables_state/tasks/main.yml +++ b/tests/integration/targets/iptables_state/tasks/main.yml @@ -29,6 +29,12 @@ when: - xtables_lock is undefined + - name: include tasks to test partial restore files + include_tasks: tests/02-partial-restore.yml + when: + - xtables_lock is undefined + + - name: include tasks to test rollbacks include_tasks: tests/10-rollback.yml when: diff --git a/tests/integration/targets/iptables_state/tasks/tests/02-partial-restore.yml b/tests/integration/targets/iptables_state/tasks/tests/02-partial-restore.yml new file mode 100644 index 0000000000..6da4814af0 --- /dev/null +++ b/tests/integration/targets/iptables_state/tasks/tests/02-partial-restore.yml @@ -0,0 +1,66 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: "Create initial rule set to use" + copy: + dest: "{{ iptables_tests }}" + content: | + *filter + :INPUT ACCEPT [0:0] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [0:0] + -A INPUT -m state --state NEW,ESTABLISHED -j ACCEPT + COMMIT + *nat + :PREROUTING ACCEPT [151:17304] + :INPUT ACCEPT [151:17304] + :OUTPUT ACCEPT [151:17304] + :POSTROUTING ACCEPT [151:17304] + -A POSTROUTING -o eth0 -j MASQUERADE + COMMIT + +- name: "Restore initial state" + iptables_state: + path: "{{ iptables_tests }}" + state: restored + async: "{{ ansible_timeout }}" + poll: 0 + +- name: "Create partial ruleset only specifying input" + copy: + dest: "{{ iptables_tests }}" + content: | + *filter + :INPUT ACCEPT [0:0] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [0:0] + -A INPUT -m state --state NEW,ESTABLISHED -j ACCEPT + COMMIT + +- name: "Check restoring partial state" + iptables_state: + path: "{{ iptables_tests }}" + state: restored + check_mode: true + register: iptables_state + + +- name: "assert that no changes are detected in check mode" + assert: + that: + - iptables_state is not changed + +- name: "Restore partial state" + iptables_state: + path: "{{ iptables_tests }}" + state: restored + register: iptables_state + async: "{{ ansible_timeout }}" + poll: 0 + +- name: "assert that no changes are made" + assert: + that: + - iptables_state is not changed \ No newline at end of file From 9f5193e40b7bb05171ac3cde9adad9de9008c246 Mon Sep 17 00:00:00 2001 From: aBUDmdBQ <135135848+aBUDmdBQ@users.noreply.github.com> Date: Sun, 24 Mar 2024 18:03:55 +0100 Subject: [PATCH 007/482] ipa_sudorule, ipa_hbacrule: change ipaenabledflag type to bool (#7880) * ipa_sudorule, ipa_hbacrule: change ipaenabledflag type to bool freeipa changed the type to bool with commit https://pagure.io/freeipa/c/6c5f2bcb301187f9844985ffe309c7d2262e16f3 * add changelog-fragment * ipa_sudorule, ipa_hbacrule: set ipaenabledflag according to version * ipa_sudorule, ipa_hbacrule: change version for backport it also got backported (https://pagure.io/freeipa/c/faeb656c77adf27a49ccaceb57fc1ba44e11cc1d) * ipa_sudorule, ipa_hbacrule: swap assigned values * Update changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml Co-authored-by: Felix Fontein --------- Co-authored-by: aBUDmdBQ <> Co-authored-by: Felix Fontein --- .../7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml | 3 +++ plugins/modules/ipa_hbacrule.py | 12 ++++++++++-- plugins/modules/ipa_sudorule.py | 12 ++++++++++-- 3 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml diff --git a/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml b/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml new file mode 100644 index 0000000000..cb2caa3780 --- /dev/null +++ b/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml @@ -0,0 +1,3 @@ +bugfixes: + - ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). + - ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py index b7633262b6..77a4d0d487 100644 --- a/plugins/modules/ipa_hbacrule.py +++ b/plugins/modules/ipa_hbacrule.py @@ -161,6 +161,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class HBACRuleIPAClient(IPAClient): @@ -231,10 +232,17 @@ def ensure(module, client): name = module.params['cn'] state = module.params['state'] + ipa_version = client.get_ipa_version() if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True else: - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False host = module.params['host'] hostcategory = module.params['hostcategory'] diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py index 4f00e88059..223f6b6de7 100644 --- a/plugins/modules/ipa_sudorule.py +++ b/plugins/modules/ipa_sudorule.py @@ -202,6 +202,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class SudoRuleIPAClient(IPAClient): @@ -334,10 +335,17 @@ def ensure(module, client): runasgroupcategory = module.params['runasgroupcategory'] runasextusers = module.params['runasextusers'] + ipa_version = client.get_ipa_version() if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True else: - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False sudoopt = module.params['sudoopt'] user = module.params['user'] From f5fa16c881b94f87e19b4d75b300db3b6f72cd20 Mon Sep 17 00:00:00 2001 From: Emilien Escalle Date: Sun, 24 Mar 2024 18:04:36 +0100 Subject: [PATCH 008/482] feat(lookup/bitwarden): add support for fetching all items from a collection (#8013) Signed-off-by: Emilien Escalle --- ...3-bitwarden-full-collection-item-list.yaml | 2 ++ plugins/lookup/bitwarden.py | 36 ++++++++++++++----- tests/unit/plugins/lookup/test_bitwarden.py | 27 +++++++++++--- 3 files changed, 51 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml diff --git a/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml b/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml new file mode 100644 index 0000000000..7337233aea --- /dev/null +++ b/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "bitwarden lookup plugin - allows to fetch all records of a given collection ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` is provided (https://github.com/ansible-collections/community.general/pull/8013)." diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 727a2bac4d..2cb2d19a18 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -29,6 +29,7 @@ DOCUMENTATION = """ - Field to retrieve, for example V(name) or V(id). - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. + - When O(collection_id) is set, this field can be undefined to retrieve the whole collection records. type: str default: name version_added: 5.7.0 @@ -75,6 +76,11 @@ EXAMPLES = """ ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} """ RETURN = """ @@ -136,32 +142,39 @@ class Bitwarden(object): raise BitwardenException(err) return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') - def _get_matches(self, search_value, search_field, collection_id): + def _get_matches(self, search_value, search_field, collection_id=None): """Return matching records whose search_field is equal to key. """ # Prepare set of params for Bitwarden CLI - if search_field == 'id': - params = ['get', 'item', search_value] + if search_value: + if search_field == 'id': + params = ['get', 'item', search_value] + else: + params = ['list', 'items', '--search', search_value] + if collection_id: + params.extend(['--collectionid', collection_id]) else: - params = ['list', 'items', '--search', search_value] + if not collection_id: + raise AnsibleError("search_value is required if collection_id is not set.") - if collection_id: - params.extend(['--collectionid', collection_id]) + params = ['list', 'items', '--collectionid', collection_id] out, err = self._run(params) # This includes things that matched in different fields. initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] - if search_field == 'id': + + if search_field == 'id' or not search_value: if initial_matches is None: initial_matches = [] else: initial_matches = [initial_matches] + # Filter to only include results from the right field. return [item for item in initial_matches if item[search_field] == search_value] - def get_field(self, field, search_value, search_field="name", collection_id=None): + def get_field(self, field, search_value=None, search_field="name", collection_id=None): """Return a list of the specified field for records whose search_field match search_value and filtered by collection if collection has been provided. @@ -188,14 +201,16 @@ class Bitwarden(object): if field in match: field_matches.append(match[field]) continue + if matches and not field_matches: raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) + return field_matches class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): + def run(self, terms=None, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) field = self.get_option('field') search_field = self.get_option('search') @@ -205,6 +220,9 @@ class LookupModule(LookupBase): if not _bitwarden.unlocked: raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") + if not terms: + return [_bitwarden.get_field(field, None, search_field, collection_id)] + return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] diff --git a/tests/unit/plugins/lookup/test_bitwarden.py b/tests/unit/plugins/lookup/test_bitwarden.py index 58f46f2cc7..9270dd44e1 100644 --- a/tests/unit/plugins/lookup/test_bitwarden.py +++ b/tests/unit/plugins/lookup/test_bitwarden.py @@ -14,10 +14,13 @@ from ansible.module_utils import six from ansible.plugins.loader import lookup_loader from ansible_collections.community.general.plugins.lookup.bitwarden import Bitwarden +MOCK_COLLECTION_ID = "3b12a9da-7c49-40b8-ad33-aede017a7ead" MOCK_RECORDS = [ { - "collectionIds": [], + "collectionIds": [ + MOCK_COLLECTION_ID + ], "deletedDate": None, "favorite": False, "fields": [ @@ -65,7 +68,9 @@ MOCK_RECORDS = [ "type": 1 }, { - "collectionIds": [], + "collectionIds": [ + MOCK_COLLECTION_ID + ], "deletedDate": None, "favorite": False, "folderId": None, @@ -85,7 +90,9 @@ MOCK_RECORDS = [ "type": 1 }, { - "collectionIds": [], + "collectionIds": [ + MOCK_COLLECTION_ID + ], "deletedDate": None, "favorite": False, "folderId": None, @@ -111,7 +118,10 @@ class MockBitwarden(Bitwarden): unlocked = True - def _get_matches(self, search_value, search_field="name", collection_id=None): + def _get_matches(self, search_value=None, search_field="name", collection_id=None): + if not search_value and collection_id: + return list(filter(lambda record: collection_id in record['collectionIds'], MOCK_RECORDS)) + return list(filter(lambda record: record[search_field] == search_value, MOCK_RECORDS)) @@ -156,9 +166,11 @@ class TestLookupModule(unittest.TestCase): def test_bitwarden_plugin_unlocked(self): record = MOCK_RECORDS[0] record_name = record['name'] - with self.assertRaises(AnsibleError): + with self.assertRaises(AnsibleError) as raised_error: self.lookup.run([record_name], field='password') + self.assertEqual("Bitwarden Vault locked. Run 'bw unlock'.", str(raised_error.exception)) + def test_bitwarden_plugin_without_session_option(self): mock_bitwarden = MockBitwarden() with patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", mock_bitwarden): @@ -178,3 +190,8 @@ class TestLookupModule(unittest.TestCase): self.lookup.run([record_name], field=None, bw_session=session) self.assertEqual(mock_bitwarden.session, session) + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_full_collection(self): + # Try to retrieve the full records of the given collection. + self.assertEqual(MOCK_RECORDS, self.lookup.run(None, collection_id=MOCK_COLLECTION_ID)[0]) From a4b32d7b9c42cf79a8f273b89c2bfdb73c657457 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Sun, 24 Mar 2024 18:05:04 +0100 Subject: [PATCH 009/482] Fix bond-slave honoring MTU (#8118) The bond-slave type should honor the request MTU value. --- changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml | 2 ++ plugins/modules/nmcli.py | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml diff --git a/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml b/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml new file mode 100644 index 0000000000..47f8af9ac3 --- /dev/null +++ b/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118). diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 4ca4198e35..9360ce37d3 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -1952,6 +1952,7 @@ class Nmcli(object): def mtu_conn_type(self): return self.type in ( 'bond', + 'bond-slave', 'dummy', 'ethernet', 'infiniband', From 795a855d0e0b319f270f869a0b1793acaba43ea3 Mon Sep 17 00:00:00 2001 From: Michael Cicogna <44257895+miccico@users.noreply.github.com> Date: Sun, 24 Mar 2024 22:22:10 +0100 Subject: [PATCH 010/482] Added startup configuration option (#8038) * Added startup configuration option Added the option to configure startup behavior of lxc containers. Works well in conjunction with onboot and allows to set startup order, startup delay and shutdown delay * Removed trailing whitespaces in documentation * added changelog fragment * Updated Documentation to suggested wording Co-authored-by: Felix Fontein * Improve documentation. * Fix changelog fragment. --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8038-proxmox-startup.yml | 2 ++ plugins/modules/proxmox.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/8038-proxmox-startup.yml diff --git a/changelogs/fragments/8038-proxmox-startup.yml b/changelogs/fragments/8038-proxmox-startup.yml new file mode 100644 index 0000000000..f8afbc0c4e --- /dev/null +++ b/changelogs/fragments/8038-proxmox-startup.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox - adds ``startup`` parameters to configure startup order, startup delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038). diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 0990169ca5..47f3faa4f2 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -81,6 +81,15 @@ options: type: list elements: str version_added: 2.0.0 + startup: + description: + - Specifies the startup order of the container. + - Use C(order=#) where C(#) is a non-negative number to define the general startup order. Shutdown in done with reverse ordering. + - Use C(up=#) where C(#) is in seconds, to specify a delay to wait before the next VM is started. + - Use C(down=#) where C(#) is in seconds, to specify a delay to wait before the next VM is stopped. + type: list + elements: str + version_added: 8.5.0 mounts: description: - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points @@ -761,6 +770,7 @@ def main(): ]), onboot=dict(type='bool'), features=dict(type='list', elements='str'), + startup=dict(type='list', elements='str'), storage=dict(default='local'), cpuunits=dict(type='int'), nameserver=dict(), @@ -859,6 +869,9 @@ def main(): features=",".join(module.params["features"]) if module.params["features"] is not None else None, + startup=",".join(module.params["startup"]) + if module.params["startup"] is not None + else None, description=module.params["description"], hookscript=module.params["hookscript"], timezone=module.params["timezone"], @@ -912,6 +925,7 @@ def main(): force=ansible_to_proxmox_bool(module.params['force']), pubkey=module.params['pubkey'], features=",".join(module.params['features']) if module.params['features'] is not None else None, + startup=",".join(module.params['startup']) if module.params['startup'] is not None else None, unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), description=module.params['description'], hookscript=module.params['hookscript'], From b389f8637f3bcc4f794c708379c9e65ff4f26f6f Mon Sep 17 00:00:00 2001 From: Gideon Date: Sun, 24 Mar 2024 15:22:44 -0600 Subject: [PATCH 011/482] =?UTF-8?q?Add=20descriptive=20error=20message=20t?= =?UTF-8?q?o=20Linode=20inventory=20plugin=20file=20checkin=E2=80=A6=20(#8?= =?UTF-8?q?133)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add descriptive error message to Linode inventory plugin file checking, tests * add changelog fragment * Adjust changelog fragment. --------- Co-authored-by: Felix Fontein --- ...r-message-for-linode-inventory-plugin.yaml | 3 +++ plugins/inventory/linode.py | 23 +++++++++++++++---- tests/unit/plugins/inventory/test_linode.py | 18 +++++++++++++-- 3 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml diff --git a/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml b/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml new file mode 100644 index 0000000000..755d7ed4fe --- /dev/null +++ b/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml @@ -0,0 +1,3 @@ +bugfixes: + - linode inventory plugin - add descriptive error message for linode inventory plugin (https://github.com/ansible-collections/community.general/pull/8133). + diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 34b1fbaf9d..e9b283e076 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -271,12 +271,25 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): strict=strict) def verify_file(self, path): - """Verify the Linode configuration file.""" + """Verify the Linode configuration file. + + Return true/false if the config-file is valid for this plugin + + Args: + str(path): path to the config + Kwargs: + None + Raises: + None + Returns: + bool(valid): is valid config file""" + valid = False if super(InventoryModule, self).verify_file(path): - endings = ('linode.yaml', 'linode.yml') - if any((path.endswith(ending) for ending in endings)): - return True - return False + if path.endswith(("linode.yaml", "linode.yml")): + valid = True + else: + self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"') + return valid def parse(self, inventory, loader, path, cache=True): """Dynamically parse Linode the cloud inventory.""" diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index a4f556761d..0f239f2dd9 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -37,11 +37,25 @@ def test_missing_access_token_lookup(inventory): assert 'Could not retrieve Linode access token' in error_message -def test_verify_file(tmp_path, inventory): +def test_verify_file_yml(tmp_path, inventory): file = tmp_path / "foobar.linode.yml" file.touch() assert inventory.verify_file(str(file)) is True +def test_verify_file_yaml(tmp_path, inventory): + file = tmp_path / "foobar.linode.yaml" + file.touch() + assert inventory.verify_file(str(file)) is True + + +def test_verify_file_bad_config_yml(inventory): + assert inventory.verify_file("foobar.linode.yml") is False + + +def test_verify_file_bad_config_yaml(inventory): + assert inventory.verify_file("foobar.linode.yaml") is False + + def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.linode.yml') is False + assert inventory.verify_file("foobar.wrongcloud.yml") is False From d62fe154d296f7d738dd4dad0263bd918aa4e607 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 25 Mar 2024 06:17:09 +0100 Subject: [PATCH 012/482] inventory plugins: make data obtained from remote unsafe (#8098) Make data obtained from remote unsafe. --- changelogs/fragments/inventory-rce.yml | 6 ++++++ plugins/inventory/cobbler.py | 17 ++++++++------- plugins/inventory/gitlab_runners.py | 7 ++++--- plugins/inventory/icinga2.py | 11 +++++----- plugins/inventory/linode.py | 21 +++++++++++-------- plugins/inventory/lxd.py | 29 +++++++++++++++++--------- plugins/inventory/nmap.py | 2 ++ plugins/inventory/online.py | 17 +++++++++------ plugins/inventory/opennebula.py | 2 ++ plugins/inventory/proxmox.py | 3 ++- plugins/inventory/scaleway.py | 5 +++-- plugins/inventory/stackpath_compute.py | 3 ++- plugins/inventory/virtualbox.py | 13 +++++++----- plugins/inventory/xen_orchestra.py | 3 ++- 14 files changed, 88 insertions(+), 51 deletions(-) create mode 100644 changelogs/fragments/inventory-rce.yml diff --git a/changelogs/fragments/inventory-rce.yml b/changelogs/fragments/inventory-rce.yml new file mode 100644 index 0000000000..9eee6dff52 --- /dev/null +++ b/changelogs/fragments/inventory-rce.yml @@ -0,0 +1,6 @@ +security_fixes: + - "cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, proxmox, scaleway, stackpath_compute, virtualbox, + and xen_orchestra inventory plugin - make sure all data received from the remote servers is marked as unsafe, so remote + code execution by obtaining texts that can be evaluated as templates is not possible + (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, + https://github.com/ansible-collections/community.general/pull/8098)." diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 8decbea309..8ca36f4264 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -117,6 +117,7 @@ from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_text from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name from ansible.module_utils.six import text_type +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe # xmlrpc try: @@ -274,9 +275,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): for host in self._get_systems(): # Get the FQDN for the host and add it to the right groups if self.inventory_hostname == 'system': - hostname = host['name'] # None + hostname = make_unsafe(host['name']) # None else: - hostname = host['hostname'] # None + hostname = make_unsafe(host['hostname']) # None interfaces = host['interfaces'] if set(host['mgmt_classes']) & set(self.include_mgmt_classes): @@ -296,7 +297,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name != "": - hostname = this_dns_name + hostname = make_unsafe(this_dns_name) self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) if hostname == '': @@ -361,18 +362,18 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if ip_address is None and ip_address_first is not None: ip_address = ip_address_first if ip_address is not None: - self.inventory.set_variable(hostname, 'cobbler_ipv4_address', ip_address) + self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address)) if ipv6_address is None and ipv6_address_first is not None: ipv6_address = ipv6_address_first if ipv6_address is not None: - self.inventory.set_variable(hostname, 'cobbler_ipv6_address', ipv6_address) + self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address)) if self.get_option('want_facts'): try: - self.inventory.set_variable(hostname, 'cobbler', host) + self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) except ValueError as e: self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) if self.get_option('want_ip_addresses'): - self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', ip_addresses) - self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', ipv6_addresses) + self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) + self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index a724a4bc71..536f4bb1b8 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -83,6 +83,7 @@ keyed_groups: from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe try: import gitlab @@ -105,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): else: runners = gl.runners.all() for runner in runners: - host = str(runner['id']) + host = make_unsafe(str(runner['id'])) ip_address = runner['ip_address'] - host_attrs = vars(gl.runners.get(runner['id']))['_attrs'] + host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs']) self.inventory.add_host(host, group='gitlab_runners') - self.inventory.set_variable(host, 'ansible_host', ip_address) + self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address)) if self.get_option('verbose_output', True): self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 6a6bafdb42..6746bb8e0f 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -102,6 +102,7 @@ from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable): @@ -240,15 +241,15 @@ class InventoryModule(BaseInventoryPlugin, Constructable): """Convert Icinga2 API data to JSON format for Ansible""" groups_dict = {"_meta": {"hostvars": {}}} for entry in json_data: - host_attrs = entry['attrs'] + host_attrs = make_unsafe(entry['attrs']) if self.inventory_attr == "name": - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "address": # When looking for address for inventory, if missing fallback to object name if host_attrs.get('address', '') != '': - host_name = host_attrs.get('address') + host_name = make_unsafe(host_attrs.get('address')) else: - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "display_name": host_name = host_attrs.get('display_name') if host_attrs['state'] == 0: @@ -265,7 +266,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # If the address attribute is populated, override ansible_host with the value if host_attrs.get('address') != '': self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) - self.inventory.set_variable(host_name, 'hostname', entry.get('name')) + self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name'))) self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name')) self.inventory.set_variable(host_name, 'state', host_attrs['state']) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index e9b283e076..fc79f12c5f 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -122,6 +122,7 @@ compose: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe try: @@ -198,20 +199,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _add_instances_to_groups(self): """Add instance names to their dynamic inventory groups.""" for instance in self.instances: - self.inventory.add_host(instance.label, group=instance.group) + self.inventory.add_host(make_unsafe(instance.label), group=instance.group) def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json + hostname = make_unsafe(instance.label) for hostvar_key in hostvars: if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: continue self.inventory.set_variable( - instance.label, + hostname, hostvar_key, - hostvars[hostvar_key] + make_unsafe(hostvars[hostvar_key]) ) if ip_style == 'api': ips = instance.ips.ipv4.public + instance.ips.ipv4.private @@ -220,9 +222,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): for ip_type in set(ip.type for ip in ips): self.inventory.set_variable( - instance.label, + hostname, ip_type, - self._ip_data([ip for ip in ips if ip.type == ip_type]) + make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) ) def _ip_data(self, ip_list): @@ -253,21 +255,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_instances_to_groups() self._add_hostvars_for_instances() for instance in self.instances: - variables = self.inventory.get_host(instance.label).get_vars() + hostname = make_unsafe(instance.label) + variables = self.inventory.get_host(hostname).get_vars() self._add_host_to_composed_groups( self.get_option('groups'), variables, - instance.label, + hostname, strict=strict) self._add_host_to_keyed_groups( self.get_option('keyed_groups'), variables, - instance.label, + hostname, strict=strict) self._set_composite_vars( self.get_option('compose'), variables, - instance.label, + hostname, strict=strict) def verify_file(self, path): diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 5b855fc97e..c803f47ddc 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -175,6 +175,7 @@ from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe try: import ipaddress @@ -670,7 +671,7 @@ class InventoryModule(BaseInventoryPlugin): if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name)) + self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) else: self.inventory.set_variable(instance_name, 'ansible_connection', 'local') @@ -696,31 +697,39 @@ class InventoryModule(BaseInventoryPlugin): if self.filter.lower() != instance_state: continue # add instance + instance_name = make_unsafe(instance_name) self.inventory.add_host(instance_name) # add network information self.build_inventory_network(instance_name) # add os v = self._get_data_entry('inventory/{0}/os'.format(instance_name)) if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower()) + self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) # add release v = self._get_data_entry('inventory/{0}/release'.format(instance_name)) if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower()) + self.inventory.set_variable( + instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) # add profile - self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry('inventory/{0}/profile'.format(instance_name)))) # add state - self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state) + self.inventory.set_variable( + instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) # add type - self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry('inventory/{0}/type'.format(instance_name)))) # add location information if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None' - self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry('inventory/{0}/location'.format(instance_name)))) # add VLAN_ID information if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)): - self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))) # add project - self.inventory.set_variable(instance_name, 'ansible_lxd_project', self._get_data_entry('inventory/{0}/project'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry('inventory/{0}/project'.format(instance_name)))) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -993,7 +1002,7 @@ class InventoryModule(BaseInventoryPlugin): for group_name in self.groupby: if not group_name.isalnum(): raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) - group_type(group_name) + group_type(make_unsafe(group_name)) def build_inventory(self): """Build dynamic inventory diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 7fa92ae979..3a28007a31 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -126,6 +126,7 @@ from ansible.errors import AnsibleParserError from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -143,6 +144,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): strict = self.get_option('strict') for host in hosts: + host = make_unsafe(host) hostname = host['name'] self.inventory.add_host(hostname) for var, value in host.items(): diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 3fccd58d2f..b3a9ecd379 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -68,6 +68,7 @@ from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six.moves.urllib.parse import urljoin +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe class InventoryModule(BaseInventoryPlugin): @@ -169,20 +170,20 @@ class InventoryModule(BaseInventoryPlugin): "support" ) for attribute in targeted_attributes: - self.inventory.set_variable(hostname, attribute, host_infos[attribute]) + self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute])) if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) - self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) + self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos))) if self.extract_os_name(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos))) if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos))) def _filter_host(self, host_infos, hostname_preferences): @@ -201,6 +202,8 @@ class InventoryModule(BaseInventoryPlugin): if not hostname: return + hostname = make_unsafe(hostname) + self.inventory.add_host(host=hostname) self._fill_host_variables(hostname=hostname, host_infos=host_infos) @@ -210,6 +213,8 @@ class InventoryModule(BaseInventoryPlugin): if not group: return + group = make_unsafe(group) + self.inventory.add_group(group=group) self.inventory.add_host(group=group, host=hostname) diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 01c0f02485..3babfa2324 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -97,6 +97,7 @@ except ImportError: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.common.text.converters import to_native +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from collections import namedtuple import os @@ -215,6 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): filter_by_label = self.get_option('filter_by_label') servers = self._retrieve_servers(filter_by_label) for server in servers: + server = make_unsafe(server) hostname = server['name'] # check for labels if group_by_labels and server['LABELS']: diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 0725819c10..ed55ef1b6a 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -226,6 +226,7 @@ from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.utils.display import Display +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -334,7 +335,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._cache[self.cache_key][url] = data - return self._cache[self.cache_key][url] + return make_unsafe(self._cache[self.cache_key][url]) def _get_nodes(self): return self._get_json("%s/api2/json/nodes" % self.proxmox_url) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 632f08402f..601129f566 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -124,6 +124,7 @@ from ansible_collections.community.general.plugins.module_utils.scaleway import from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.six import raise_from +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe import ansible.module_utils.six.moves.urllib.parse as urllib_parse @@ -279,7 +280,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): zone_info = SCALEWAY_LOCATION[zone] url = _build_server_url(zone_info["api_endpoint"]) - raw_zone_hosts_infos = _fetch_information(url=url, token=token) + raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token)) for host_infos in raw_zone_hosts_infos: @@ -341,4 +342,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable): hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) + self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 39f880e820..9a556d39e0 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -72,6 +72,7 @@ from ansible.plugins.inventory import ( Cacheable ) from ansible.utils.display import Display +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe display = Display() @@ -271,7 +272,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not cache or cache_needs_update: results = self._query() - self._populate(results) + self._populate(make_unsafe(results)) # If the cache has expired/doesn't exist or # if refresh_inventory/flush cache is used diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index c926d8b449..8604808e15 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -62,6 +62,7 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native, to_ from ansible.module_utils.common._collections_compat import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -116,6 +117,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) def _populate_from_cache(self, source_data): + source_data = make_unsafe(source_data) hostvars = source_data.pop('_meta', {}).get('hostvars', {}) for group in source_data: if group == 'all': @@ -162,7 +164,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): v = v.strip() # found host if k.startswith('Name') and ',' not in v: # some setting strings appear in Name - current_host = v + current_host = make_unsafe(v) if current_host not in hostvars: hostvars[current_host] = {} self.inventory.add_host(current_host) @@ -170,12 +172,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: - self.inventory.set_variable(current_host, 'ansible_host', netdata) + self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata)) # found groups elif k == 'Groups': for group in v.split('/'): if group: + group = make_unsafe(group) group = self.inventory.add_group(group) self.inventory.add_child(group, current_host) if group not in cacheable_results: @@ -185,17 +188,17 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): else: # found vars, accumulate in hostvars for clean inventory set - pref_k = 'vbox_' + k.strip().replace(' ', '_') + pref_k = make_unsafe('vbox_' + k.strip().replace(' ', '_')) leading_spaces = len(k) - len(k.lstrip(' ')) if 0 < leading_spaces <= 2: if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): hostvars[current_host][prevkey] = {} - hostvars[current_host][prevkey][pref_k] = v + hostvars[current_host][prevkey][pref_k] = make_unsafe(v) elif leading_spaces > 2: continue else: if v != '': - hostvars[current_host][pref_k] = v + hostvars[current_host][pref_k] = make_unsafe(v) if self._ungrouped_host(current_host, cacheable_results): if 'ungrouped' not in cacheable_results: cacheable_results['ungrouped'] = {'hosts': []} diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 3004ab3432..96dd997701 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -82,6 +82,7 @@ from time import sleep from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -347,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self.protocol = 'ws' objects = self._get_objects() - self._populate(objects) + self._populate(make_unsafe(objects)) From 609f28f791eb516b72fcb64c29b3b946dc295f01 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 25 Mar 2024 06:42:04 +0100 Subject: [PATCH 013/482] snap: disable some tests that take way too long (#8148) * Do not install microk8s in snap tests, as it is too slow. * Do not install cider in snap tests, as it is slow. --- tests/integration/targets/snap/tasks/main.yml | 10 ++++++---- tests/integration/targets/snap/tasks/test_channel.yml | 2 ++ .../integration/targets/snap/tasks/test_dangerous.yml | 2 ++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index e96fbde38b..2a683617ae 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -13,10 +13,12 @@ block: - name: Include test ansible.builtin.include_tasks: test.yml - - name: Include test_channel - ansible.builtin.include_tasks: test_channel.yml - - name: Include test_dangerous - ansible.builtin.include_tasks: test_dangerous.yml + # TODO: Find better package to install from a channel - microk8s installation takes multiple minutes, and even removal takes one minute! + # - name: Include test_channel + # ansible.builtin.include_tasks: test_channel.yml + # TODO: Find bettter package to download and install from sources - cider 1.6.0 takes over 35 seconds to install + # - name: Include test_dangerous + # ansible.builtin.include_tasks: test_dangerous.yml - name: Include test_3dash ansible.builtin.include_tasks: test_3dash.yml - name: Include test_empty_list diff --git a/tests/integration/targets/snap/tasks/test_channel.yml b/tests/integration/targets/snap/tasks/test_channel.yml index 63c1d104f9..e9eb19c897 100644 --- a/tests/integration/targets/snap/tasks/test_channel.yml +++ b/tests/integration/targets/snap/tasks/test_channel.yml @@ -3,6 +3,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later +# NOTE This is currently disabled for performance reasons! + - name: Make sure package is not installed (microk8s) community.general.snap: name: microk8s diff --git a/tests/integration/targets/snap/tasks/test_dangerous.yml b/tests/integration/targets/snap/tasks/test_dangerous.yml index 4de6d4e402..8fe4edee0b 100644 --- a/tests/integration/targets/snap/tasks/test_dangerous.yml +++ b/tests/integration/targets/snap/tasks/test_dangerous.yml @@ -3,6 +3,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later +# NOTE This is currently disabled for performance reasons! + - name: Make sure package is not installed (cider) community.general.snap: name: cider From 90c9f20ef8cd330d2dca1a0d3557bd92d22b7054 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 25 Mar 2024 14:35:25 +0100 Subject: [PATCH 014/482] The next release will be 8.6.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 4dbadd2f62..757e6c907f 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 8.5.0 +version: 8.6.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 4f758bfb84c639f3ce092a6dbba30a5e42bbd062 Mon Sep 17 00:00:00 2001 From: Denis Borisov Date: Mon, 25 Mar 2024 20:32:42 +0300 Subject: [PATCH 015/482] java_cert: owner, group, mode arguments enabled (#8116) * java_cert: owner, group, mode arguments enabled * java_cert: sanity fix * add changelog fragment * remove duplication in documentation * refactor change detection * fix indentation * Update changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml Co-authored-by: Felix Fontein * Update plugins/modules/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/java_cert.py Co-authored-by: Felix Fontein * update options --------- Co-authored-by: Felix Fontein --- ...java_cert-enable-owner-group-mode-args.yml | 2 + plugins/modules/java_cert.py | 86 +++++++++++++------ 2 files changed, 61 insertions(+), 27 deletions(-) create mode 100644 changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml diff --git a/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml b/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml new file mode 100644 index 0000000000..f36c145d74 --- /dev/null +++ b/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_cert - enable ``owner``, ``group``, ``mode``, and other generic file arguments (https://github.com/ansible-collections/community.general/pull/8116). \ No newline at end of file diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py index 3f3e5aa014..72302b12c1 100644 --- a/plugins/modules/java_cert.py +++ b/plugins/modules/java_cert.py @@ -18,6 +18,7 @@ description: and optionally private keys to a given java keystore, or remove them from it. extends_documentation_fragment: - community.general.attributes + - ansible.builtin.files attributes: check_mode: support: full @@ -98,6 +99,24 @@ options: type: str choices: [ absent, present ] default: present + mode: + version_added: 8.5.0 + owner: + version_added: 8.5.0 + group: + version_added: 8.5.0 + seuser: + version_added: 8.5.0 + serole: + version_added: 8.5.0 + setype: + version_added: 8.5.0 + selevel: + version_added: 8.5.0 + unsafe_writes: + version_added: 8.5.0 + attributes: + version_added: 8.5.0 requirements: [openssl, keytool] author: - Adam Hamsik (@haad) @@ -331,6 +350,12 @@ def build_proxy_options(): return proxy_opts +def _update_permissions(module, keystore_path): + """ Updates keystore file attributes as necessary """ + file_args = module.load_file_common_arguments(module.params, path=keystore_path) + return module.set_fs_attributes_if_different(file_args, False) + + def _download_cert_url(module, executable, url, port): """ Fetches the certificate from the remote URL using `keytool -printcert...` The PEM formatted string is returned """ @@ -375,15 +400,15 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia # Use local certificate from local path and import it to a java keystore (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) - diff = {'before': '\n', 'after': '%s\n' % keystore_alias} - if import_rc == 0 and os.path.exists(keystore_path): - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) - else: + + if import_rc != 0 or not os.path.exists(keystore_path): module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): ''' Import certificate from path into keystore located on @@ -408,17 +433,17 @@ def import_cert_path(module, executable, path, keystore_path, keystore_pass, ali (import_rc, import_out, import_err) = module.run_command(import_cmd, data="%s\n%s" % (keystore_pass, keystore_pass), check_rc=False) - diff = {'before': '\n', 'after': '%s\n' % alias} - if import_rc == 0: - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) - else: - module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) + + if import_rc != 0: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) -def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True): +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): ''' Delete certificate identified with alias from keystore on keystore_path ''' del_cmd = [ executable, @@ -434,13 +459,13 @@ def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystor # Delete SSL certificate from keystore (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) + diff = {'before': '%s\n' % alias, 'after': None} - if exit_after: - diff = {'before': '%s\n' % alias, 'after': None} + if del_rc != 0: + module.fail_json(msg=del_out, rc=del_rc, cmd=del_cmd, error=del_err) - module.exit_json(changed=True, msg=del_out, - rc=del_rc, cmd=del_cmd, stdout=del_out, - error=del_err, diff=diff) + return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd, + stdout=del_out, error=del_err, diff=diff) def test_keytool(module, executable): @@ -485,6 +510,7 @@ def main(): ['cert_url', 'cert_path', 'pkcs12_path'] ], supports_check_mode=True, + add_file_common_args=True, ) url = module.params.get('cert_url') @@ -526,12 +552,14 @@ def main(): module.add_cleanup_file(new_certificate) module.add_cleanup_file(old_certificate) + result = dict() + if state == 'absent' and alias_exists: if module.check_mode: module.exit_json(changed=True) - # delete and exit - delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + # delete + result = delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) # dump certificate to enroll in the keystore on disk and compute digest if state == 'present': @@ -569,16 +597,20 @@ def main(): if alias_exists: # The certificate in the keystore does not match with the one we want to be present # The existing certificate must first be deleted before we insert the correct one - delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) if pkcs12_path: - import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, - keystore_path, keystore_pass, cert_alias, keystore_type) + result = import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) else: - import_cert_path(module, executable, new_certificate, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) + result = import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) - module.exit_json(changed=False) + if os.path.exists(keystore_path): + changed_permissions = _update_permissions(module, keystore_path) + result['changed'] = result.get('changed', False) or changed_permissions + + module.exit_json(**result) if __name__ == "__main__": From b2b8fc30bf9bbccb8cab06a21833d2d7a179435b Mon Sep 17 00:00:00 2001 From: RayJin2000 Date: Thu, 28 Mar 2024 07:35:46 +0100 Subject: [PATCH 016/482] HAProxy skips the wait when drain=true and the backend is down - fix issue 8092 (#8100) * fix issue 8092 * "is not" => "!=" * moved the drain & down cause to the wait * added changelogs for PR 8100 * fixed yaml * fixed file type * Apply suggestions from code review Removed a dot from a comment Co-authored-by: Felix Fontein * Update plugins/modules/haproxy.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8100-haproxy-drain-fails-on-down-backend.yml | 2 ++ plugins/modules/haproxy.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml diff --git a/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml b/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml new file mode 100644 index 0000000000..58f1478914 --- /dev/null +++ b/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml @@ -0,0 +1,2 @@ +bugfixes: + - "haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092)." diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index 05f52d55c8..cbaa438334 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -343,7 +343,7 @@ class HAProxy(object): if state is not None: self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) - if self.wait: + if self.wait and not (wait_for_status == "DRAIN" and state == "DOWN"): self.wait_until_status(backend, svname, wait_for_status) def get_state_for(self, pxname, svname): From b4635719020fc3e5f0cd6eebd9be53164d92ac8c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 29 Mar 2024 19:10:42 +0100 Subject: [PATCH 017/482] gitlab modules: do not crash if python-gitlab isn't there (#8158) Do not crash if python-gitlab isn't there. --- changelogs/fragments/8158-gitlab-version-check.yml | 2 ++ plugins/module_utils/gitlab.py | 13 ++++++++++--- plugins/modules/gitlab_issue.py | 8 +------- plugins/modules/gitlab_label.py | 12 ++---------- plugins/modules/gitlab_milestone.py | 12 ++---------- 5 files changed, 17 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/8158-gitlab-version-check.yml diff --git a/changelogs/fragments/8158-gitlab-version-check.yml b/changelogs/fragments/8158-gitlab-version-check.yml new file mode 100644 index 0000000000..046bca938f --- /dev/null +++ b/changelogs/fragments/8158-gitlab-version-check.yml @@ -0,0 +1,2 @@ +bugfixes: + - "gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158)." diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index f9872b877f..b1354d8a9d 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -81,16 +81,23 @@ def find_group(gitlab_instance, identifier): return group -def ensure_gitlab_package(module): +def ensure_gitlab_package(module, min_version=None): if not HAS_GITLAB_PACKAGE: module.fail_json( msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR ) + gitlab_version = gitlab.__version__ + if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version): + module.fail_json( + msg="This module requires python-gitlab Python module >= %s " + "(installed version: %s). Please upgrade python-gitlab to version %s or above." + % (min_version, gitlab_version, min_version) + ) -def gitlab_authentication(module): - ensure_gitlab_package(module) +def gitlab_authentication(module, min_version=None): + ensure_gitlab_package(module, min_version=min_version) gitlab_url = module.params['api_url'] validate_certs = module.params['validate_certs'] diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py index 6d95bf6cff..3277c4f1aa 100644 --- a/plugins/modules/gitlab_issue.py +++ b/plugins/modules/gitlab_issue.py @@ -143,7 +143,6 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.common.text.converters import to_native, to_text -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group ) @@ -330,13 +329,8 @@ def main(): state_filter = module.params['state_filter'] title = module.params['title'] - gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_issue requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." - " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - # check prerequisites and connect to gitlab server - gitlab_instance = gitlab_authentication(module) + gitlab_instance = gitlab_authentication(module, min_version='2.3.0') this_project = find_project(gitlab_instance, project) if this_project is None: diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py index f2c8393f22..635033ab6c 100644 --- a/plugins/modules/gitlab_label.py +++ b/plugins/modules/gitlab_label.py @@ -222,9 +222,8 @@ labels_obj: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project, gitlab + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project ) @@ -450,14 +449,7 @@ def main(): label_list = module.params['labels'] state = module.params['state'] - gitlab_version = gitlab.__version__ - _min_gitlab = '3.2.0' - if LooseVersion(gitlab_version) < LooseVersion(_min_gitlab): - module.fail_json(msg="community.general.gitlab_label requires python-gitlab Python module >= %s " - "(installed version: [%s]). Please upgrade " - "python-gitlab to version %s or above." % (_min_gitlab, gitlab_version, _min_gitlab)) - - gitlab_instance = gitlab_authentication(module) + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') # find_project can return None, but the other must exist gitlab_project_id = find_project(gitlab_instance, gitlab_project) diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py index 0a616ea475..4b8b933cc0 100644 --- a/plugins/modules/gitlab_milestone.py +++ b/plugins/modules/gitlab_milestone.py @@ -206,9 +206,8 @@ milestones_obj: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project, gitlab + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project ) from datetime import datetime @@ -452,14 +451,7 @@ def main(): milestone_list = module.params['milestones'] state = module.params['state'] - gitlab_version = gitlab.__version__ - _min_gitlab = '3.2.0' - if LooseVersion(gitlab_version) < LooseVersion(_min_gitlab): - module.fail_json(msg="community.general.gitlab_milestone requires python-gitlab Python module >= %s " - "(installed version: [%s]). Please upgrade " - "python-gitlab to version %s or above." % (_min_gitlab, gitlab_version, _min_gitlab)) - - gitlab_instance = gitlab_authentication(module) + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') # find_project can return None, but the other must exist gitlab_project_id = find_project(gitlab_instance, gitlab_project) From b444e8739c4a445311709420b47026a9ebe76ac2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 30 Mar 2024 22:32:51 +0100 Subject: [PATCH 018/482] xml: make module work with lxml 5.1.1 (#8169) Make module work with lxml 5.1.1. --- changelogs/fragments/8169-lxml.yml | 2 ++ plugins/modules/xml.py | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8169-lxml.yml diff --git a/changelogs/fragments/8169-lxml.yml b/changelogs/fragments/8169-lxml.yml new file mode 100644 index 0000000000..e2c1b8b952 --- /dev/null +++ b/changelogs/fragments/8169-lxml.yml @@ -0,0 +1,2 @@ +bugfixes: + - "xml - make module work with lxml 5.1.1, which removed some internals that the module was relying on (https://github.com/ansible-collections/community.general/pull/8169)." diff --git a/plugins/modules/xml.py b/plugins/modules/xml.py index a3c12b8eec..f5cdbeac38 100644 --- a/plugins/modules/xml.py +++ b/plugins/modules/xml.py @@ -436,11 +436,16 @@ def is_attribute(tree, xpath, namespaces): """ Test if a given xpath matches and that match is an attribute An xpath attribute search will only match one item""" + + # lxml 5.1.1 removed etree._ElementStringResult, so we can no longer simply assume it's there + # (https://github.com/lxml/lxml/commit/eba79343d0e7ad1ce40169f60460cdd4caa29eb3) + ElementStringResult = getattr(etree, '_ElementStringResult', None) + if xpath_matches(tree, xpath, namespaces): match = tree.xpath(xpath, namespaces=namespaces) - if isinstance(match[0], etree._ElementStringResult): + if isinstance(match[0], etree._ElementUnicodeResult): return True - elif isinstance(match[0], etree._ElementUnicodeResult): + elif ElementStringResult is not None and isinstance(match[0], ElementStringResult): return True return False From 48b5a7a80a17e01853423c886b2abe0aedc0323e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 3 Apr 2024 07:57:25 +0200 Subject: [PATCH 019/482] CI: add stable-2.17, bump devel to 2.18, move stable-2.14 from AZP to GHA (#8179) * Add stable-2.17 to CI; add ignores for 2.18. * Move stable-2.14 tests to GHA. * Update README. --- .azure-pipelines/azure-pipelines.yml | 138 +++++++++++++-------------- .github/workflows/ansible-test.yml | 21 ++++ README.md | 2 +- tests/sanity/ignore-2.18.txt | 17 ++++ tests/sanity/ignore-2.18.txt.license | 3 + 5 files changed, 108 insertions(+), 73 deletions(-) create mode 100644 tests/sanity/ignore-2.18.txt create mode 100644 tests/sanity/ignore-2.18.txt.license diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 163d71b628..fce33f6346 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -73,6 +73,19 @@ stages: - test: 3 - test: 4 - test: extra + - stage: Sanity_2_17 + displayName: Sanity 2.17 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.17/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 - stage: Sanity_2_16 displayName: Sanity 2.16 dependsOn: [] @@ -99,19 +112,6 @@ stages: - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_14 - displayName: Sanity 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.14/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 ### Units - stage: Units_devel displayName: Units devel @@ -128,6 +128,17 @@ stages: - test: '3.10' - test: '3.11' - test: '3.12' + - stage: Units_2_17 + displayName: Units 2.17 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.17/units/{0}/1 + targets: + - test: 3.7 + - test: "3.12" - stage: Units_2_16 displayName: Units 2.16 dependsOn: [] @@ -151,16 +162,6 @@ stages: targets: - test: 3.5 - test: "3.10" - - stage: Units_2_14 - displayName: Units 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.14/units/{0}/1 - targets: - - test: 3.9 ## Remote - stage: Remote_devel_extra_vms @@ -191,14 +192,26 @@ stages: test: macos/14.3 - name: RHEL 9.3 test: rhel/9.3 - - name: FreeBSD 13.3 - test: freebsd/13.3 - name: FreeBSD 14.0 test: freebsd/14.0 groups: - 1 - 2 - 3 + - stage: Remote_2_17 + displayName: Remote 2.17 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.17/{0} + targets: + - name: FreeBSD 13.3 + test: freebsd/13.3 + groups: + - 1 + - 2 + - 3 - stage: Remote_2_16 displayName: Remote 2.16 dependsOn: [] @@ -241,24 +254,6 @@ stages: - 1 - 2 - 3 - - stage: Remote_2_14 - displayName: Remote 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.14/{0} - targets: - #- name: macOS 12.0 - # test: macos/12.0 - - name: RHEL 9.0 - test: rhel/9.0 - #- name: FreeBSD 12.4 - # test: freebsd/12.4 - groups: - - 1 - - 2 - - 3 ### Docker - stage: Docker_devel @@ -275,6 +270,18 @@ stages: test: ubuntu2004 - name: Ubuntu 22.04 test: ubuntu2204 + groups: + - 1 + - 2 + - 3 + - stage: Docker_2_17 + displayName: Docker 2.17 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.17/linux/{0} + targets: - name: Alpine 3.19 test: alpine319 groups: @@ -315,20 +322,6 @@ stages: - 1 - 2 - 3 - - stage: Docker_2_14 - displayName: Docker 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.14/linux/{0} - targets: - - name: Alpine 3 - test: alpine3 - groups: - - 1 - - 2 - - 3 ### Community Docker - stage: Docker_community_devel @@ -359,6 +352,17 @@ stages: parameters: nameFormat: Python {0} testFormat: devel/generic/{0}/1 + targets: + - test: '3.8' + - test: '3.11' + - stage: Generic_2_17 + displayName: Generic 2.17 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.17/generic/{0}/1 targets: - test: '3.7' - test: '3.12' @@ -384,42 +388,32 @@ stages: testFormat: 2.15/generic/{0}/1 targets: - test: '3.9' - - stage: Generic_2_14 - displayName: Generic 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.14/generic/{0}/1 - targets: - - test: '3.10' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel + - Sanity_2_17 - Sanity_2_16 - Sanity_2_15 - - Sanity_2_14 - Units_devel + - Units_2_17 - Units_2_16 - Units_2_15 - - Units_2_14 - Remote_devel_extra_vms - Remote_devel + - Remote_2_17 - Remote_2_16 - Remote_2_15 - - Remote_2_14 - Docker_devel + - Docker_2_17 - Docker_2_16 - Docker_2_15 - - Docker_2_14 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel +# - Generic_2_17 # - Generic_2_16 # - Generic_2_15 -# - Generic_2_14 jobs: - template: templates/coverage.yml diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index bc9daaa43e..ecfc365655 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -30,6 +30,7 @@ jobs: matrix: ansible: - '2.13' + - '2.14' # Ansible-test on various stable branches does not yet work well with cgroups v2. # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 # image for these stable branches. The list of branches where this is necessary will @@ -72,6 +73,8 @@ jobs: python: '2.7' - ansible: '2.13' python: '3.8' + - ansible: '2.14' + python: '3.9' steps: - name: >- @@ -148,11 +151,29 @@ jobs: docker: alpine3 python: '' target: azp/posix/3/ + # 2.14 + - ansible: '2.14' + docker: alpine3 + python: '' + target: azp/posix/1/ + - ansible: '2.14' + docker: alpine3 + python: '' + target: azp/posix/2/ + - ansible: '2.14' + docker: alpine3 + python: '' + target: azp/posix/3/ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - ansible: '2.13' # docker: default # python: '3.9' # target: azp/generic/1/ + # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. + # - ansible: '2.14' + # docker: default + # python: '3.10' + # target: azp/generic/1/ steps: - name: >- diff --git a/README.md b/README.md index 96b3c952db..dd1a50b0ec 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https: ## Tested with Ansible -Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. +Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt new file mode 100644 index 0000000000..d75aaeac27 --- /dev/null +++ b/tests/sanity/ignore-2.18.txt @@ -0,0 +1,17 @@ +plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice +plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin +plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen +plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice +plugins/modules/parted.py validate-modules:parameter-state-invalid-choice +plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 +plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 +plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 +plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice +plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.18.txt.license b/tests/sanity/ignore-2.18.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/sanity/ignore-2.18.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project From e4e091accaa1d8ecbd7370db6c433bae7a81ce6d Mon Sep 17 00:00:00 2001 From: Boris Glimcher <36732377+glimchb@users.noreply.github.com> Date: Wed, 3 Apr 2024 07:47:00 -0400 Subject: [PATCH 020/482] Redfish: implementing ResetToDefaults (#8164) Fixing #8163 Signed-off-by: Boris Glimcher --- ...redfish-implementing-reset-to-defaults.yml | 2 + plugins/module_utils/redfish_utils.py | 48 +++++++++++++++++++ plugins/modules/redfish_command.py | 17 +++++++ 3 files changed, 67 insertions(+) create mode 100644 changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml diff --git a/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml b/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml new file mode 100644 index 0000000000..212ecc9fd8 --- /dev/null +++ b/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add command ``ResetToDefaults`` to reset manager to default state (https://github.com/ansible-collections/community.general/issues/8163). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 4c20571295..76d73fad25 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1149,6 +1149,54 @@ class RedfishUtils(object): return response return {'ret': True, 'changed': True} + def manager_reset_to_defaults(self, command): + return self.reset_to_defaults(command, self.manager_uri, + '#Manager.ResetToDefaults') + + def reset_to_defaults(self, command, resource_uri, action_name): + key = "Actions" + reset_type_values = ['ResetAll', + 'PreserveNetworkAndUsers', + 'PreserveNetwork'] + + if command not in reset_type_values: + return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + + # read the resource and get the current power state + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + data = response['data'] + + # get the reset Action and target URI + if key not in data or action_name not in data[key]: + return {'ret': False, 'msg': 'Action %s not found' % action_name} + reset_action = data[key][action_name] + if 'target' not in reset_action: + return {'ret': False, + 'msg': 'target URI missing from Action %s' % action_name} + action_uri = reset_action['target'] + + # get AllowableValues + ai = self._get_all_action_info_values(reset_action) + allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + + # map ResetType to an allowable value if needed + if allowable_values and command not in allowable_values: + return {'ret': False, + 'msg': 'Specified reset type (%s) not supported ' + 'by service. Supported types: %s' % + (command, allowable_values)} + + # define payload + payload = {'ResetType': command} + + # POST to Action URI + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + def _find_account_uri(self, username=None, acct_id=None): if not any((username, acct_id)): return {'ret': False, 'msg': diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index e66380493c..06224235a8 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -281,6 +281,12 @@ options: - BIOS attributes that needs to be verified in the given server. type: dict version_added: 6.4.0 + reset_to_defaults_mode: + description: + - Mode to apply when reseting to default. + type: str + choices: [ ResetAll, PreserveNetworkAndUsers, PreserveNetwork ] + version_added: 8.6.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -714,6 +720,13 @@ EXAMPLES = ''' command: PowerReboot resource_id: BMC + - name: Factory reset manager to defaults + community.general.redfish_command: + category: Manager + command: ResetToDefaults + resource_id: BMC + reset_to_defaults_mode: ResetAll + - name: Verify BIOS attributes community.general.redfish_command: category: Systems @@ -764,6 +777,7 @@ CATEGORY_COMMANDS_ALL = { "UpdateAccountServiceProperties"], "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", + "ResetToDefaults", "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], "Update": ["SimpleUpdate", "MultipartHTTPPushUpdate", "PerformRequestedOperations"], @@ -825,6 +839,7 @@ def main(): ) ), strip_etag_quotes=dict(type='bool', default=False), + reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), bios_attributes=dict(type="dict") ), required_together=[ @@ -1017,6 +1032,8 @@ def main(): result = rf_utils.virtual_media_insert(virtual_media, category) elif command == 'VirtualMediaEject': result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'ResetToDefaults': + result = rf_utils.manager_reset_to_defaults(module.params['reset_to_defaults_mode']) elif category == "Update": # execute only if we find UpdateService resources From 13d0310e91b3afb4e13a28d6d85a153ee99b4b71 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 4 Apr 2024 22:51:32 +0200 Subject: [PATCH 021/482] CI: ansible-core devel removed Python 3.7 support, no longer allows 'vars:' with lists (#8190) * Ansible-core devel removed Python 3.7 support. * Do not use 'vars' with lists. --- .azure-pipelines/azure-pipelines.yml | 1 - tests/integration/targets/lookup_lmdb_kv/test.yml | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index fce33f6346..be8f011bdf 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -122,7 +122,6 @@ stages: nameFormat: Python {0} testFormat: devel/units/{0}/1 targets: - - test: 3.7 - test: 3.8 - test: 3.9 - test: '3.10' diff --git a/tests/integration/targets/lookup_lmdb_kv/test.yml b/tests/integration/targets/lookup_lmdb_kv/test.yml index 217c020cac..8a88bca456 100644 --- a/tests/integration/targets/lookup_lmdb_kv/test.yml +++ b/tests/integration/targets/lookup_lmdb_kv/test.yml @@ -19,13 +19,13 @@ - item.0 == 'nl' - item.1 == 'Netherlands' vars: - - lmdb_kv_db: jp.mdb + lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - n* - assert: that: - item == 'Belgium' vars: - - lmdb_kv_db: jp.mdb + lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - be From 610ecf9bf552717e34b6677328f83df3523e048c Mon Sep 17 00:00:00 2001 From: Herschdorfer Date: Sat, 6 Apr 2024 12:17:43 +0200 Subject: [PATCH 022/482] updated lxd_container.py docs (#8168) linuxcontainer.org has phased out LXC/LXD support. This edit reflects part of it in the docs. --- plugins/modules/lxd_container.py | 41 +++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index 9fd1b183be..b82e2be9b7 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -86,8 +86,8 @@ options: source: description: - 'The source for the instance - (for example V({ "type": "image", "mode": "pull", "server": "https://images.linuxcontainers.org", - "protocol": "lxd", "alias": "ubuntu/xenial/amd64" })).' + (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/", + "protocol": "simplestreams", "alias": "22.04" })).' - 'See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.' - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).' required: false @@ -205,6 +205,9 @@ notes: - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). See the first example below. + - linuxcontainers.org has phased out LXC/LXD support with March 2024 + (U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)). + Currently only Ubuntu is still providing images. ''' EXAMPLES = ''' @@ -220,9 +223,9 @@ EXAMPLES = ''' source: type: image mode: pull - server: https://images.linuxcontainers.org - protocol: lxd # if you get a 404, try setting protocol: simplestreams - alias: ubuntu/xenial/amd64 + server: https://cloud-images.ubuntu.com/releases/ + protocol: simplestreams + alias: "22.04" profiles: ["default"] wait_for_ipv4_addresses: true timeout: 600 @@ -264,6 +267,26 @@ EXAMPLES = ''' wait_for_ipv4_addresses: true timeout: 600 +# An example of creating a ubuntu-minial container +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + # Provides Ubuntu minimal images + server: https://cloud-images.ubuntu.com/minimal/releases/ + protocol: simplestreams + alias: "22.04" + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + # An example for creating container in project other than default - hosts: localhost connection: local @@ -278,8 +301,8 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: https://images.linuxcontainers.org - alias: ubuntu/20.04/cloud + server: https://cloud-images.ubuntu.com/releases/ + alias: "22.04" profiles: ["default"] wait_for_ipv4_addresses: true timeout: 600 @@ -347,7 +370,7 @@ EXAMPLES = ''' source: type: image mode: pull - alias: ubuntu/xenial/amd64 + alias: "22.04" target: node01 - name: Create container on another node @@ -358,7 +381,7 @@ EXAMPLES = ''' source: type: image mode: pull - alias: ubuntu/xenial/amd64 + alias: "22.04" target: node02 # An example for creating a virtual machine From bc2ff24f74aa4f9120f2f6a8d2d256fdfcd83871 Mon Sep 17 00:00:00 2001 From: Kris Matthews Date: Mon, 8 Apr 2024 16:06:55 -0400 Subject: [PATCH 023/482] Add check_type option, to allow defaults type changes (#8173) * Add check_type option, to allow defaults type changes * Add changelog fragment * Changelog fragments are yaml, not markdown * Update changelogs/fragments/8173-osx_defaults-check_type.yml Co-authored-by: Felix Fontein * Update plugins/modules/osx_defaults.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8173-osx_defaults-check_type.yml | 2 ++ plugins/modules/osx_defaults.py | 18 ++++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8173-osx_defaults-check_type.yml diff --git a/changelogs/fragments/8173-osx_defaults-check_type.yml b/changelogs/fragments/8173-osx_defaults-check_type.yml new file mode 100644 index 0000000000..a35f609bf3 --- /dev/null +++ b/changelogs/fragments/8173-osx_defaults-check_type.yml @@ -0,0 +1,2 @@ +minor_changes: + - osx_defaults - add option ``check_types`` to enable changing the type of existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173). diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py index 336e953320..db5d889a37 100644 --- a/plugins/modules/osx_defaults.py +++ b/plugins/modules/osx_defaults.py @@ -50,6 +50,13 @@ options: type: str choices: [ array, bool, boolean, date, float, int, integer, string ] default: string + check_type: + description: + - Checks if the type of the provided O(value) matches the type of an existing default. + - If the types do not match, raises an error. + type: bool + default: true + version_added: 8.6.0 array_add: description: - Add new elements to the array for a key which has an array as its value. @@ -158,6 +165,7 @@ class OSXDefaults(object): self.domain = module.params['domain'] self.host = module.params['host'] self.key = module.params['key'] + self.check_type = module.params['check_type'] self.type = module.params['type'] self.array_add = module.params['array_add'] self.value = module.params['value'] @@ -349,10 +357,11 @@ class OSXDefaults(object): self.delete() return True - # There is a type mismatch! Given type does not match the type in defaults - value_type = type(self.value) - if self.current_value is not None and not isinstance(self.current_value, value_type): - raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) + # Check if there is a type mismatch, e.g. given type does not match the type in defaults + if self.check_type: + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): + raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) # Current value matches the given value. Nothing need to be done. Arrays need extra care if self.type == "array" and self.current_value is not None and not self.array_add and \ @@ -383,6 +392,7 @@ def main(): domain=dict(type='str', default='NSGlobalDomain'), host=dict(type='str'), key=dict(type='str', no_log=False), + check_type=dict(type='bool', default=True), type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), array_add=dict(type='bool', default=False), value=dict(type='raw'), From b674f94f646e3050eebd2fb1322ef93a1eb98a83 Mon Sep 17 00:00:00 2001 From: Dmitriy Usachev Date: Tue, 9 Apr 2024 08:40:10 +0300 Subject: [PATCH 024/482] module_utils/ipa.py: fix regex when parsing version (#8175) * module_utils/ipa.py: fix regex when parsing version * add changelog fragments * Update changelogs/fragments/8175-get_ipa_version_regex.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Dmitriy Usachev Co-authored-by: Felix Fontein --- changelogs/fragments/8175-get_ipa_version_regex.yml | 2 ++ plugins/module_utils/ipa.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8175-get_ipa_version_regex.yml diff --git a/changelogs/fragments/8175-get_ipa_version_regex.yml b/changelogs/fragments/8175-get_ipa_version_regex.yml new file mode 100644 index 0000000000..e2a51d1b91 --- /dev/null +++ b/changelogs/fragments/8175-get_ipa_version_regex.yml @@ -0,0 +1,2 @@ +bugfixes: + - ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175). diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index eda9b4132b..fb63d5556b 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -104,7 +104,7 @@ class IPAClient(object): def get_ipa_version(self): response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') + ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*') version_match = ipa_ver_regex.match(response) ipa_version = None if version_match: From 39ef949f27d17922f93dd43c80bbeb8b0d6bbc82 Mon Sep 17 00:00:00 2001 From: Codey Schoettle <165081359+c-cschoettle@users.noreply.github.com> Date: Tue, 9 Apr 2024 01:42:19 -0400 Subject: [PATCH 025/482] Update nmcli.py to support OVS commands (#8154) * Update nmcli.py to support OVS commands Adding Openvswitch command support and documentation to the nmcli module * Fixed versioning and documentation, added changelog fragment * Update changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml Co-authored-by: Felix Fontein * Update plugins/modules/nmcli.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8154-add-ovs-commands-to-nmcli-module.yml | 2 + plugins/modules/nmcli.py | 47 +++++++++++++++++-- 2 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml diff --git a/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml b/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml new file mode 100644 index 0000000000..d1fb344ba5 --- /dev/null +++ b/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` (https://github.com/ansible-collections/community.general/pull/8154). \ No newline at end of file diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 9360ce37d3..6f0884da92 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -64,13 +64,16 @@ options: - Type V(infiniband) is added in community.general 2.0.0. - Type V(loopback) is added in community.general 8.1.0. - Type V(macvlan) is added in community.general 6.6.0. + - Type V(ovs-bridge) is added in community.general 8.6.0. + - Type V(ovs-interface) is added in community.general 8.6.0. + - Type V(ovs-port) is added in community.general 8.6.0. - Type V(wireguard) is added in community.general 4.3.0. - Type V(vpn) is added in community.general 5.1.0. - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option. - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option. type: str choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan, - wifi, gsm, wireguard, vpn, loopback ] + wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, loopback ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -86,12 +89,13 @@ options: slave_type: description: - Type of the device of this slave's master connection (for example V(bond)). + - Type V(ovs-port) is added in community.general 8.6.0. type: str - choices: [ 'bond', 'bridge', 'team' ] + choices: [ 'bond', 'bridge', 'team', 'ovs-port' ] version_added: 7.0.0 master: description: - - Master Date: Tue, 9 Apr 2024 13:44:21 +0800 Subject: [PATCH 026/482] fix(aix_filesystem): remove extra param from running lsvg (#8176) * fix(aix_filesystem): remove extra param from running lsvg * chore: add new line to changlog file * Update 8151-fix-lsvg_cmd-failed.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8151-fix-lsvg_cmd-failed.yml | 2 ++ plugins/modules/aix_filesystem.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8151-fix-lsvg_cmd-failed.yml diff --git a/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml b/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml new file mode 100644 index 0000000000..0eeee752df --- /dev/null +++ b/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml @@ -0,0 +1,2 @@ +bugfixes: + - aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` (https://github.com/ansible-collections/community.general/issues/8151). diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py index 6abf6317f2..4a3775c672 100644 --- a/plugins/modules/aix_filesystem.py +++ b/plugins/modules/aix_filesystem.py @@ -242,7 +242,7 @@ def _validate_vg(module, vg): if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"]) + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) From 9307b76e744661ef8d62bf829f02393100e8ba91 Mon Sep 17 00:00:00 2001 From: Steffen Scheib <37306894+sscheib@users.noreply.github.com> Date: Tue, 9 Apr 2024 08:01:44 +0200 Subject: [PATCH 027/482] fix: Ensuring interpolation is disabled for ConfigParser (#8185) * fix: Ensuring interpolation is disabled for ConfigParser This PR disables interpolation of ConfigParser and adds test coverage for that. * Adding changelog fragment * Fixing missing extension of changelog fragment * Adding issue link to changelog fragment * Update changelogs/fragments/8183-from_ini_to_ini.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8183-from_ini_to_ini.yml | 3 +++ plugins/filter/from_ini.py | 2 +- plugins/filter/to_ini.py | 2 +- .../integration/targets/filter_from_ini/tasks/main.yml | 10 ++++++++-- tests/integration/targets/filter_to_ini/tasks/main.yml | 6 ++++++ 5 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8183-from_ini_to_ini.yml diff --git a/changelogs/fragments/8183-from_ini_to_ini.yml b/changelogs/fragments/8183-from_ini_to_ini.yml new file mode 100644 index 0000000000..1ff455f6ee --- /dev/null +++ b/changelogs/fragments/8183-from_ini_to_ini.yml @@ -0,0 +1,3 @@ +bugfixes: + - "to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." + - "from_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py index d68b51092e..6fe83875e6 100644 --- a/plugins/filter/from_ini.py +++ b/plugins/filter/from_ini.py @@ -57,7 +57,7 @@ class IniParser(ConfigParser): ''' Implements a configparser which is able to return a dict ''' def __init__(self): - super().__init__() + super().__init__(interpolation=None) self.optionxform = str def as_dict(self): diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py index 22ef16d722..bdf2dde270 100644 --- a/plugins/filter/to_ini.py +++ b/plugins/filter/to_ini.py @@ -63,7 +63,7 @@ class IniParser(ConfigParser): ''' Implements a configparser which sets the correct optionxform ''' def __init__(self): - super().__init__() + super().__init__(interpolation=None) self.optionxform = str diff --git a/tests/integration/targets/filter_from_ini/tasks/main.yml b/tests/integration/targets/filter_from_ini/tasks/main.yml index a2eca36a6e..abb92dfc55 100644 --- a/tests/integration/targets/filter_from_ini/tasks/main.yml +++ b/tests/integration/targets/filter_from_ini/tasks/main.yml @@ -12,15 +12,21 @@ another_section: connection: 'ssh' + interpolate_test: + interpolate_test_key: '%' + - name: 'Write INI file that reflects ini_test_dict to {{ ini_test_file }}' ansible.builtin.copy: dest: '{{ ini_test_file }}' content: | [section_name] - key_name=key value + key_name = key value [another_section] - connection=ssh + connection = ssh + + [interpolate_test] + interpolate_test_key = % - name: 'Slurp the test file: {{ ini_test_file }}' ansible.builtin.slurp: diff --git a/tests/integration/targets/filter_to_ini/tasks/main.yml b/tests/integration/targets/filter_to_ini/tasks/main.yml index 877d4471d8..e16aa98a5a 100644 --- a/tests/integration/targets/filter_to_ini/tasks/main.yml +++ b/tests/integration/targets/filter_to_ini/tasks/main.yml @@ -16,6 +16,9 @@ another_section: connection: 'ssh' + interpolate_test: + interpolate_test_key: '%' + - name: 'Write INI file manually to {{ ini_test_file }}' ansible.builtin.copy: dest: '{{ ini_test_file }}' @@ -26,6 +29,9 @@ [another_section] connection = ssh + [interpolate_test] + interpolate_test_key = % + - name: 'Slurp the manually created test file: {{ ini_test_file }}' ansible.builtin.slurp: src: '{{ ini_test_file }}' From 6c8f949ba950309931d8931f4820b99707898850 Mon Sep 17 00:00:00 2001 From: Boris Glimcher <36732377+glimchb@users.noreply.github.com> Date: Fri, 12 Apr 2024 01:42:55 -0400 Subject: [PATCH 028/482] Redfish: add `Multipart` bool return value to `FirmwareUpdateCapabilities` (#8195) * Redfish: add Multipart bool return value to FirmwareUpdateCapabilities Fixes #8194 Signed-off-by: Boris Glimcher * Update changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml Co-authored-by: Felix Fontein --------- Signed-off-by: Boris Glimcher Co-authored-by: Felix Fontein --- .../fragments/8194-redfish-add-multipart-to-capabilities.yml | 2 ++ plugins/module_utils/redfish_utils.py | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml diff --git a/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml b/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml new file mode 100644 index 0000000000..6b96d98a7f --- /dev/null +++ b/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 76d73fad25..6935573d0b 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1597,6 +1597,8 @@ class RedfishUtils(object): data = response['data'] + result['multipart_supported'] = 'MultipartHttpPushUri' in data + if "Actions" in data: actions = data['Actions'] if len(actions) > 0: From bafad8ecd4e411076200eb7be7e70f4b5f0d991f Mon Sep 17 00:00:00 2001 From: tobixx Date: Wed, 17 Apr 2024 23:22:22 +0200 Subject: [PATCH 029/482] Support newer 'riak admin' sub-command beside legacy 'riak-admin' command (#8211) * Support newer 'riak admin' sub-command * Added changelog for riak admin sub-command * Added blank line * Apply suggestions from code review Co-authored-by: Felix Fontein * replaced string commands with lists added white space removed white space removed parenthesis * Update changelogs/fragments/8211-riak-admin-sub-command-support.yml Co-authored-by: Don Naro --------- Co-authored-by: Felix Fontein Co-authored-by: Don Naro --- .../8211-riak-admin-sub-command-support.yml | 2 ++ plugins/modules/riak.py | 15 ++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/8211-riak-admin-sub-command-support.yml diff --git a/changelogs/fragments/8211-riak-admin-sub-command-support.yml b/changelogs/fragments/8211-riak-admin-sub-command-support.yml new file mode 100644 index 0000000000..dc6eb00e45 --- /dev/null +++ b/changelogs/fragments/8211-riak-admin-sub-command-support.yml @@ -0,0 +1,2 @@ +bugfixes: + - "riak - support ``riak admin`` sub-command in newer Riak KV versions beside the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211)." \ No newline at end of file diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py index fe295d2d6d..438263da22 100644 --- a/plugins/modules/riak.py +++ b/plugins/modules/riak.py @@ -93,7 +93,7 @@ from ansible.module_utils.urls import fetch_url def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin + cmd = riak_admin_bin + ['ringready'] rc, out, err = module.run_command(cmd) if rc == 0 and 'TRUE All nodes agree on the ring' in out: return True @@ -127,6 +127,7 @@ def main(): # make sure riak commands are on the path riak_bin = module.get_bin_path('riak') riak_admin_bin = module.get_bin_path('riak-admin') + riak_admin_bin = [riak_admin_bin] if riak_admin_bin is not None else [riak_bin, 'admin'] timeout = time.time() + 120 while True: @@ -164,7 +165,7 @@ def main(): module.fail_json(msg=out) elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin + cmd = riak_admin_bin + ['test'] rc, out, err = module.run_command(cmd) if rc == 0: result['kv_test'] = out @@ -175,7 +176,7 @@ def main(): if nodes.count(node_name) == 1 and len(nodes) > 1: result['join'] = 'Node is already in cluster or staged to be in cluster.' else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) + cmd = riak_admin_bin + ['cluster', 'join', target_node] rc, out, err = module.run_command(cmd) if rc == 0: result['join'] = out @@ -184,7 +185,7 @@ def main(): module.fail_json(msg=out) elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin + cmd = riak_admin_bin + ['cluster', 'plan'] rc, out, err = module.run_command(cmd) if rc == 0: result['plan'] = out @@ -194,7 +195,7 @@ def main(): module.fail_json(msg=out) elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin + cmd = riak_admin_bin + ['cluster', 'commit'] rc, out, err = module.run_command(cmd) if rc == 0: result['commit'] = out @@ -206,7 +207,7 @@ def main(): if wait_for_handoffs: timeout = time.time() + wait_for_handoffs while True: - cmd = '%s transfers' % riak_admin_bin + cmd = riak_admin_bin + ['transfers'] rc, out, err = module.run_command(cmd) if 'No transfers active' in out: result['handoffs'] = 'No transfers active.' @@ -216,7 +217,7 @@ def main(): module.fail_json(msg='Timeout waiting for handoffs.') if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] + cmd = riak_admin_bin + ['wait_for_service', 'riak_%s' % wait_for_service, node_name] rc, out, err = module.run_command(cmd) result['service'] = out From da29ea151db4a11833400f931a37d744a56dd117 Mon Sep 17 00:00:00 2001 From: Manuel Luzarreta Date: Wed, 17 Apr 2024 23:23:18 +0200 Subject: [PATCH 030/482] passwordstore: Add missing_subkey parameter (#8166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * passwordstore: Add missing_subkey parameter Add ability to trigger error or warning when a subkey is missing in pass file. By default the behavior is unchanged (if subkey is missing, None is returned). This option can also be set in ansible.cfg * passwordstore - missing_subkey: Update changelog/fragments file with PR number * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...6-password-store-lookup-missing-subkey.yml | 2 ++ plugins/lookup/passwordstore.py | 31 +++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 changelogs/fragments/8166-password-store-lookup-missing-subkey.yml diff --git a/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml b/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml new file mode 100644 index 0000000000..da5be9c9e0 --- /dev/null +++ b/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml @@ -0,0 +1,2 @@ +minor_changes: + - passwordstore lookup - add ``missing_subkey`` parameter defining the behavior of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166). diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 7a6fca7a01..9814fe133b 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -139,6 +139,21 @@ DOCUMENTATION = ''' type: bool default: true version_added: 8.1.0 + missing_subkey: + description: + - Preference about what to do if the password subkey is missing. + - If set to V(error), the lookup will error out if the subkey does not exist. + - If set to V(empty) or V(warn), will return a V(none) in case the subkey does not exist. + version_added: 8.6.0 + type: str + default: empty + choices: + - error + - warn + - empty + ini: + - section: passwordstore_lookup + key: missing_subkey notes: - The lookup supports passing all options as lookup parameters since community.general 6.0.0. ''' @@ -147,6 +162,7 @@ ansible.cfg: | [passwordstore_lookup] lock=readwrite locktimeout=45s + missing_subkey=warn tasks.yml: | --- @@ -432,6 +448,20 @@ class LookupModule(LookupBase): if self.paramvals['subkey'] in self.passdict: return self.passdict[self.paramvals['subkey']] else: + if self.paramvals["missing_subkey"] == "error": + raise AnsibleError( + "passwordstore: subkey {0} for passname {1} not found and missing_subkey=error is set".format( + self.paramvals["subkey"], self.passname + ) + ) + + if self.paramvals["missing_subkey"] == "warn": + display.warning( + "passwordstore: subkey {0} for passname {1} not found".format( + self.paramvals["subkey"], self.passname + ) + ) + return None @contextmanager @@ -481,6 +511,7 @@ class LookupModule(LookupBase): 'umask': self.get_option('umask'), 'timestamp': self.get_option('timestamp'), 'preserve': self.get_option('preserve'), + "missing_subkey": self.get_option("missing_subkey"), } def run(self, terms, variables, **kwargs): From 12b76ead2999dffc6739b8f1961ddf67c509e446 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 18 Apr 2024 12:22:48 +0200 Subject: [PATCH 031/482] Include changelog in docsite (#8234) Include changelog in docsite. --- docs/docsite/config.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 docs/docsite/config.yml diff --git a/docs/docsite/config.yml b/docs/docsite/config.yml new file mode 100644 index 0000000000..1d6cf8554a --- /dev/null +++ b/docs/docsite/config.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +changelog: + write_changelog: true From a5b2b5ce8ca879a49df69a5fd997f74dd5139c47 Mon Sep 17 00:00:00 2001 From: Anders Stiksrud Helmen Date: Sat, 20 Apr 2024 09:24:00 +0200 Subject: [PATCH 032/482] Add support for docker-v2 protocol in Keycloak modules (#8216) * Add support for docker-v2 protocol in Keycloak modules * use dash instead of underscore for the docker-v2 * Update documentation * Add changelog fragment * fix missing whitespace around operator * Update changelogs/fragments/8215-add-docker-v2-protocol.yml Update changelog fragment to reviewers suggestion, add refrence to issue and pull request Co-authored-by: Felix Fontein * Add documentation about adding docker-v2 value in community general 8.6.0 --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8215-add-docker-v2-protocol.yml | 2 ++ plugins/modules/keycloak_client.py | 10 ++++++---- plugins/modules/keycloak_clientscope.py | 9 +++++---- plugins/modules/keycloak_clienttemplate.py | 9 +++++---- 4 files changed, 18 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/8215-add-docker-v2-protocol.yml diff --git a/changelogs/fragments/8215-add-docker-v2-protocol.yml b/changelogs/fragments/8215-add-docker-v2-protocol.yml new file mode 100644 index 0000000000..6a9cc60556 --- /dev/null +++ b/changelogs/fragments/8215-add-docker-v2-protocol.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, https://github.com/ansible-collections/community.general/pull/8216). diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index b151e4541f..0766608b45 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -248,8 +248,9 @@ options: description: - Type of client. - At creation only, default value will be V(openid-connect) if O(protocol) is omitted. + - The V(docker-v2) value was added in community.general 8.6.0. type: str - choices: ['openid-connect', 'saml'] + choices: ['openid-connect', 'saml', 'docker-v2'] full_scope_allowed: description: @@ -393,7 +394,7 @@ options: protocol: description: - This specifies for which protocol this protocol mapper is active. - choices: ['openid-connect', 'saml'] + choices: ['openid-connect', 'saml', 'docker-v2'] type: str protocolMapper: @@ -724,6 +725,7 @@ import copy PROTOCOL_OPENID_CONNECT = 'openid-connect' PROTOCOL_SAML = 'saml' +PROTOCOL_DOCKER_V2 = 'docker-v2' CLIENT_META_DATA = ['authorizationServicesEnabled'] @@ -785,7 +787,7 @@ def main(): consentText=dict(type='str'), id=dict(type='str'), name=dict(type='str'), - protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), protocolMapper=dict(type='str'), config=dict(type='dict'), ) @@ -819,7 +821,7 @@ def main(): authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), public_client=dict(type='bool', aliases=['publicClient']), frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), - protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), attributes=dict(type='dict'), full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index d37af5f0cf..d24e0f1f27 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -79,7 +79,8 @@ options: protocol: description: - Type of client. - choices: ['openid-connect', 'saml', 'wsfed'] + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] type: str protocol_mappers: @@ -95,7 +96,7 @@ options: description: - This specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml', 'wsfed'] + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] type: str protocolMapper: @@ -330,7 +331,7 @@ def main(): protmapper_spec = dict( id=dict(type='str'), name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), protocolMapper=dict(type='str'), config=dict(type='dict'), ) @@ -341,7 +342,7 @@ def main(): id=dict(type='str'), name=dict(type='str'), description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), attributes=dict(type='dict'), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), ) diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py index cd7f6c09b7..7bffb5cbb6 100644 --- a/plugins/modules/keycloak_clienttemplate.py +++ b/plugins/modules/keycloak_clienttemplate.py @@ -68,7 +68,8 @@ options: protocol: description: - Type of client template. - choices: ['openid-connect', 'saml'] + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'docker-v2'] type: str full_scope_allowed: @@ -107,7 +108,7 @@ options: protocol: description: - This specifies for which protocol this protocol mapper is active. - choices: ['openid-connect', 'saml'] + choices: ['openid-connect', 'saml', 'docker-v2'] type: str protocolMapper: @@ -292,7 +293,7 @@ def main(): consentText=dict(type='str'), id=dict(type='str'), name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), protocolMapper=dict(type='str'), config=dict(type='dict'), ) @@ -304,7 +305,7 @@ def main(): id=dict(type='str'), name=dict(type='str'), description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), attributes=dict(type='dict'), full_scope_allowed=dict(type='bool'), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), From 1bbef5884488901a7e5e50c719e2d97c1c9452d6 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 20 Apr 2024 09:24:27 +0200 Subject: [PATCH 033/482] Deprecate hipchat callback (#8189) * Deprecate hipchat callback. * Avoid duplicate 'callback' key. --- changelogs/fragments/hipchat.yml | 4 ++++ meta/runtime.yml | 40 ++++++++++++++++++-------------- plugins/callback/hipchat.py | 4 ++++ 3 files changed, 30 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/hipchat.yml diff --git a/changelogs/fragments/hipchat.yml b/changelogs/fragments/hipchat.yml new file mode 100644 index 0000000000..0260c09c84 --- /dev/null +++ b/changelogs/fragments/hipchat.yml @@ -0,0 +1,4 @@ +deprecated_features: + - "hipchat callback plugin - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + The callback plugin is therefore deprecated and will be removed from community.general 10.0.0 if nobody provides compelling reasons to still keep it + (https://github.com/ansible-collections/community.general/issues/8184, https://github.com/ansible-collections/community.general/pull/8189)." diff --git a/meta/runtime.yml b/meta/runtime.yml index 1dcd0878a5..27a4bd1ae3 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -13,6 +13,28 @@ action_groups: - consul_session - consul_token plugin_routing: + callback: + actionable: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. + full_skip: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. + hipchat: + deprecation: + removal_version: 10.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + osx_say: + redirect: community.general.say + stderr: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. connection: docker: redirect: community.docker.docker @@ -780,24 +802,6 @@ plugin_routing: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - callback: - actionable: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' and 'display_ok_hosts = no' options. - full_skip: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' option. - osx_say: - redirect: community.general.say - stderr: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr - = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index 3e10b69e7f..afd9e20559 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -18,6 +18,10 @@ DOCUMENTATION = ''' description: - This callback plugin sends status updates to a HipChat channel during playbook execution. - Before 2.4 only environment variables were available for configuring this plugin. + deprecated: + removed_in: 10.0.0 + why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + alternative: There is none. options: token: description: HipChat API token for v1 or v2 API. From 1b8e6bc95bd3cc749ea65b8d713b1facc1db0c7e Mon Sep 17 00:00:00 2001 From: Niklas Schwarz Date: Sat, 20 Apr 2024 09:25:26 +0200 Subject: [PATCH 034/482] puppet_utils: Do not force lang for cmd (#8001) puppet_utils: Add option to set LANG for puppet execution Add option `environment_lang` to force the LANG when executing the puppet agent. The default is the `C` LANG --- changelogs/fragments/puppet_lang_force.yml | 3 +++ plugins/module_utils/puppet.py | 1 + plugins/modules/puppet.py | 10 ++++++++++ 3 files changed, 14 insertions(+) create mode 100644 changelogs/fragments/puppet_lang_force.yml diff --git a/changelogs/fragments/puppet_lang_force.yml b/changelogs/fragments/puppet_lang_force.yml new file mode 100644 index 0000000000..b826c8dba4 --- /dev/null +++ b/changelogs/fragments/puppet_lang_force.yml @@ -0,0 +1,3 @@ +bugfixes: + - puppet - add option ``environment_lang`` to set the environment language encoding. Defaults to lang ``C``. It is recommended + to set it to ``C.UTF-8`` or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000) diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py index 8d553a2d28..f05b0673f6 100644 --- a/plugins/module_utils/puppet.py +++ b/plugins/module_utils/puppet.py @@ -107,5 +107,6 @@ def puppet_runner(module): verbose=cmd_runner_fmt.as_bool("--verbose"), ), check_rc=False, + force_lang=module.params["environment_lang"], ) return runner diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index 86eac062a8..b28583fe05 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -116,6 +116,15 @@ options: - Whether to print file changes details type: bool default: false + environment_lang: + description: + - The lang environment to use when running the puppet agent. + - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output + - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure + the selected locale is supported on the system the puppet agent runs on. + type: str + default: C + version_added: 8.6.0 requirements: - puppet author: @@ -208,6 +217,7 @@ def main(): debug=dict(type='bool', default=False), verbose=dict(type='bool', default=False), use_srv_records=dict(type='bool'), + environment_lang=dict(type='str', default='C'), ), supports_check_mode=True, mutually_exclusive=[ From 524d5883b8f51974284654f6f3292bbd22cfd59f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 20 Apr 2024 09:26:08 +0200 Subject: [PATCH 035/482] Avoid deprecated utcnow() (#8222) Avoid deprecated utcnow(). --- changelogs/fragments/8222-datetime.yml | 3 ++ plugins/callback/loganalytics.py | 13 +++++--- plugins/callback/logstash.py | 9 ++++-- plugins/callback/splunk.py | 13 +++++--- plugins/callback/sumologic.py | 14 ++++---- plugins/module_utils/datetime.py | 32 +++++++++++++++++++ plugins/module_utils/scaleway.py | 8 +++-- plugins/modules/cobbler_sync.py | 9 ++++-- plugins/modules/cobbler_system.py | 9 ++++-- plugins/modules/github_key.py | 10 ++++-- plugins/modules/imc_rest.py | 13 +++++--- plugins/modules/pagerduty.py | 10 ++++-- plugins/modules/pagerduty_change.py | 8 +++-- plugins/modules/scaleway_compute.py | 5 +-- plugins/modules/scaleway_database_backup.py | 7 ++-- plugins/modules/scaleway_lb.py | 5 +-- plugins/modules/statusio_maintenance.py | 12 ++++--- .../plugins/callback/test_loganalytics.py | 14 ++++---- tests/unit/plugins/callback/test_splunk.py | 12 +++---- 19 files changed, 142 insertions(+), 64 deletions(-) create mode 100644 changelogs/fragments/8222-datetime.yml create mode 100644 plugins/module_utils/datetime.py diff --git a/changelogs/fragments/8222-datetime.yml b/changelogs/fragments/8222-datetime.yml new file mode 100644 index 0000000000..00bf862186 --- /dev/null +++ b/changelogs/fragments/8222-datetime.yml @@ -0,0 +1,3 @@ +minor_changes: + - "Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead of offset-naive UTC timestamps, + which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222)." diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index fbcdc6f89f..ed7e47b2e2 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -59,13 +59,16 @@ import uuid import socket import getpass -from datetime import datetime from os.path import basename from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class AzureLogAnalyticsSource(object): def __init__(self): @@ -93,7 +96,7 @@ class AzureLogAnalyticsSource(object): return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) def __rfc1123date(self): - return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + return now().strftime('%a, %d %b %Y %H:%M:%S GMT') def send_event(self, workspace_id, shared_key, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: @@ -167,7 +170,7 @@ class CallbackModule(CallbackBase): def _seconds_since_start(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -185,10 +188,10 @@ class CallbackModule(CallbackBase): self.loganalytics.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.loganalytics.send_event( diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 144e1f9915..f3725e465a 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -99,7 +99,6 @@ from ansible import context import socket import uuid import logging -from datetime import datetime try: import logstash @@ -109,6 +108,10 @@ except ImportError: from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class CallbackModule(CallbackBase): @@ -126,7 +129,7 @@ class CallbackModule(CallbackBase): "pip install python-logstash for Python 2" "pip install python3-logstash for Python 3") - self.start_time = datetime.utcnow() + self.start_time = now() def _init_plugin(self): if not self.disabled: @@ -185,7 +188,7 @@ class CallbackModule(CallbackBase): self.logger.info("ansible start", extra=data) def v2_playbook_on_stats(self, stats): - end_time = datetime.utcnow() + end_time = now() runtime = end_time - self.start_time summarize_stat = {} for host in stats.processed.keys(): diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index d15547f44b..a3e401bc21 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -88,13 +88,16 @@ import uuid import socket import getpass -from datetime import datetime from os.path import basename from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SplunkHTTPCollectorSource(object): def __init__(self): @@ -134,7 +137,7 @@ class SplunkHTTPCollectorSource(object): else: time_format = '%Y-%m-%d %H:%M:%S +0000' - data['timestamp'] = datetime.utcnow().strftime(time_format) + data['timestamp'] = now().strftime(time_format) data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user @@ -181,7 +184,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -220,10 +223,10 @@ class CallbackModule(CallbackBase): self.splunk.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.splunk.send_event( diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 46ab3f0f7c..0304b9de52 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -46,13 +46,16 @@ import uuid import socket import getpass -from datetime import datetime from os.path import basename from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SumologicHTTPCollectorSource(object): def __init__(self): @@ -84,8 +87,7 @@ class SumologicHTTPCollectorSource(object): data['uuid'] = result._task._uuid data['session'] = self.session data['status'] = state - data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' - '+0000') + data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000') data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user @@ -123,7 +125,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -144,10 +146,10 @@ class CallbackModule(CallbackBase): self.sumologic.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.sumologic.send_event( diff --git a/plugins/module_utils/datetime.py b/plugins/module_utils/datetime.py new file mode 100644 index 0000000000..c7899f68da --- /dev/null +++ b/plugins/module_utils/datetime.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Felix Fontein +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime as _datetime +import sys + + +_USE_TIMEZONE = sys.version_info >= (3, 6) + + +def ensure_timezone_info(value): + if not _USE_TIMEZONE or value.tzinfo is not None: + return value + return value.astimezone(_datetime.timezone.utc) + + +def fromtimestamp(value): + if _USE_TIMEZONE: + return _datetime.fromtimestamp(value, tz=_datetime.timezone.utc) + return _datetime.utcfromtimestamp(value) + + +def now(): + if _USE_TIMEZONE: + return _datetime.datetime.now(tz=_datetime.timezone.utc) + return _datetime.datetime.utcnow() diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 67b821103a..1310ba5602 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -17,6 +17,10 @@ from ansible.module_utils.basic import env_fallback, missing_required_lib from ansible.module_utils.urls import fetch_url from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + SCALEWAY_SECRET_IMP_ERR = None try: from passlib.hash import argon2 @@ -306,10 +310,10 @@ class Scaleway(object): # Prevent requesting the resource status too soon time.sleep(wait_sleep_time) - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: self.module.debug("We are going to wait for the resource to finish its transition") state = self.fetch_state(resource) diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py index 4ec87c96c7..27f57028be 100644 --- a/plugins/modules/cobbler_sync.py +++ b/plugins/modules/cobbler_sync.py @@ -75,13 +75,16 @@ RETURN = r''' # Default return values ''' -import datetime import ssl from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def main(): module = AnsibleModule( @@ -110,7 +113,7 @@ def main(): changed=True, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -142,7 +145,7 @@ def main(): except Exception as e: module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py index cecc02f717..a327ede84b 100644 --- a/plugins/modules/cobbler_system.py +++ b/plugins/modules/cobbler_system.py @@ -152,7 +152,6 @@ system: type: dict ''' -import datetime import ssl from ansible.module_utils.basic import AnsibleModule @@ -160,6 +159,10 @@ from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + IFPROPS_MAPPING = dict( bondingopts='bonding_opts', bridgeopts='bridge_opts', @@ -232,7 +235,7 @@ def main(): changed=False, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -340,7 +343,7 @@ def main(): if module._diff: result['diff'] = dict(before=system, after=result['system']) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index fa3a0a01fa..a74ead9848 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -91,12 +91,17 @@ EXAMPLES = ''' pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" ''' +import datetime import json import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + API_BASE = 'https://api.github.com' @@ -151,14 +156,13 @@ def get_all_keys(session): def create_key(session, name, pubkey, check_mode): if check_mode: - from datetime import datetime - now = datetime.utcnow() + now_t = now() return { 'id': 0, 'key': pubkey, 'title': name, 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'created_at': datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), 'read_only': False, 'verified': False } diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index 113d341e89..7f5a5e0814 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -268,7 +268,6 @@ output: errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> ''' -import datetime import os import traceback @@ -292,6 +291,10 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves import zip_longest from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def imc_response(module, rawoutput, rawinput=''): ''' Handle IMC returned data ''' @@ -375,14 +378,14 @@ def main(): else: module.fail_json(msg='Cannot find/access path:\n%s' % path) - start = datetime.datetime.utcnow() + start = now() # Perform login first url = '%s://%s/nuova' % (protocol, hostname) data = '' % (username, password) resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or auth['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) result.update(imc_response(module, resp.read())) @@ -415,7 +418,7 @@ def main(): # Perform actual request resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) # Merge results with previous results @@ -431,7 +434,7 @@ def main(): result['changed'] = ('modified' in results) # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.exit_json(**result) finally: logout(module, url, cookie, timeout) diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py index 596c4f4da1..853bd6d797 100644 --- a/plugins/modules/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -151,6 +151,10 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class PagerDutyRequest(object): def __init__(self, module, name, user, token): @@ -206,9 +210,9 @@ class PagerDutyRequest(object): return [{'id': service, 'type': 'service_reference'}] def _compute_start_end_time(self, hours, minutes): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") + now_t = now() + later = now_t + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now_t.strftime("%Y-%m-%dT%H:%M:%SZ") end = later.strftime("%Y-%m-%dT%H:%M:%SZ") return start, end diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py index 1a1e50dcf7..acd31fb447 100644 --- a/plugins/modules/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -110,7 +110,10 @@ EXAMPLES = ''' from ansible.module_utils.urls import fetch_url from ansible.module_utils.basic import AnsibleModule -from datetime import datetime + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) def main(): @@ -161,8 +164,7 @@ def main(): if module.params['environment']: custom_details['environment'] = module.params['environment'] - now = datetime.utcnow() - timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + timestamp = now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") payload = { 'summary': module.params['summary'], diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index 7f85bc6686..58a3215056 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -183,6 +183,7 @@ import datetime import time from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import now from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway SCALEWAY_SERVER_STATES = ( @@ -235,9 +236,9 @@ def wait_to_complete_state_transition(compute_api, server, wait=None): wait_timeout = compute_api.module.params["wait_timeout"] wait_sleep_time = compute_api.module.params["wait_sleep_time"] - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: compute_api.module.debug("We are going to wait for the server to finish its transition") if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: compute_api.module.debug("It seems that the server is not in transition anymore.") diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py index 592ec0b7ff..1d0c17fb6d 100644 --- a/plugins/modules/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -170,6 +170,9 @@ import datetime import time from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) from ansible_collections.community.general.plugins.module_utils.scaleway import ( Scaleway, scaleway_argument_spec, @@ -189,9 +192,9 @@ def wait_to_complete_state_transition(module, account_api, backup=None): if backup is None or backup['status'] in stable_states: return backup - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: module.debug('We are going to wait for the backup to finish its transition') response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 3e43a8ae2b..5bd16c3f4e 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -161,6 +161,7 @@ RETURNS = ''' import datetime import time from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import now from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway STABLE_STATES = ( @@ -208,9 +209,9 @@ def wait_to_complete_state_transition(api, lb, force_wait=False): wait_timeout = api.module.params["wait_timeout"] wait_sleep_time = api.module.params["wait_sleep_time"] - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: api.module.debug("We are going to wait for the load-balancer to finish its transition") state = fetch_state(api, lb) if state in STABLE_STATES: diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py index e6b34b7098..0a96d0fb41 100644 --- a/plugins/modules/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -188,6 +188,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import open_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def get_api_auth_headers(api_id, api_key, url, statuspage): @@ -270,11 +274,11 @@ def get_date_time(start_date, start_time, minutes): except (NameError, ValueError): return 1, None, "Couldn't work out a valid date" else: - now = datetime.datetime.utcnow() - delta = now + datetime.timedelta(minutes=minutes) + now_t = now() + delta = now_t + datetime.timedelta(minutes=minutes) # start_date - returned_date.append(now.strftime("%m/%d/%Y")) - returned_date.append(now.strftime("%H:%M")) + returned_date.append(now_t.strftime("%m/%d/%Y")) + returned_date.append(now_t.strftime("%H:%M")) # end_date returned_date.append(delta.strftime("%m/%d/%Y")) returned_date.append(delta.strftime("%H:%M")) diff --git a/tests/unit/plugins/callback/test_loganalytics.py b/tests/unit/plugins/callback/test_loganalytics.py index 17932ed5fa..4d7c2c9db9 100644 --- a/tests/unit/plugins/callback/test_loganalytics.py +++ b/tests/unit/plugins/callback/test_loganalytics.py @@ -9,8 +9,8 @@ from ansible.executor.task_result import TaskResult from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock from ansible_collections.community.general.plugins.callback.loganalytics import AzureLogAnalyticsSource -from datetime import datetime +from datetime import datetime import json import sys @@ -32,10 +32,10 @@ class TestAzureLogAnalytics(unittest.TestCase): if sys.version_info < (3, 2): self.assertRegex = self.assertRegexpMatches - @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime') + @patch('ansible_collections.community.general.plugins.callback.loganalytics.now') @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') - def test_overall(self, open_url_mock, mock_datetime): - mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + def test_overall(self, open_url_mock, mock_now): + mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', @@ -52,10 +52,10 @@ class TestAzureLogAnalytics(unittest.TestCase): self.assertEqual(sent_data['event']['uuid'], 'myuuid') self.assertEqual(args[0], 'https://01234567-0123-0123-0123-01234567890a.ods.opinsights.azure.com/api/logs?api-version=2016-04-01') - @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime') + @patch('ansible_collections.community.general.plugins.callback.loganalytics.now') @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') - def test_auth_headers(self, open_url_mock, mock_datetime): - mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + def test_auth_headers(self, open_url_mock, mock_now): + mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', diff --git a/tests/unit/plugins/callback/test_splunk.py b/tests/unit/plugins/callback/test_splunk.py index ddcdae24c7..c09540fc00 100644 --- a/tests/unit/plugins/callback/test_splunk.py +++ b/tests/unit/plugins/callback/test_splunk.py @@ -27,10 +27,10 @@ class TestSplunkClient(unittest.TestCase): self.mock_host = Mock('MockHost') self.mock_host.name = 'myhost' - @patch('ansible_collections.community.general.plugins.callback.splunk.datetime') + @patch('ansible_collections.community.general.plugins.callback.splunk.now') @patch('ansible_collections.community.general.plugins.callback.splunk.open_url') - def test_timestamp_with_milliseconds(self, open_url_mock, mock_datetime): - mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + def test_timestamp_with_milliseconds(self, open_url_mock, mock_now): + mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.splunk.send_event( @@ -45,10 +45,10 @@ class TestSplunkClient(unittest.TestCase): self.assertEqual(sent_data['event']['host'], 'my-host') self.assertEqual(sent_data['event']['ip_address'], '1.2.3.4') - @patch('ansible_collections.community.general.plugins.callback.splunk.datetime') + @patch('ansible_collections.community.general.plugins.callback.splunk.now') @patch('ansible_collections.community.general.plugins.callback.splunk.open_url') - def test_timestamp_without_milliseconds(self, open_url_mock, mock_datetime): - mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + def test_timestamp_without_milliseconds(self, open_url_mock, mock_now): + mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.splunk.send_event( From 7fd37ea247ba351b541d472cbedefc60fb98473f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 20 Apr 2024 09:39:42 +0200 Subject: [PATCH 036/482] inventory plugins: make wrapping variables as unsafe smarter to avoid triggering an AWX bug (#8225) Make wrapping variables as unsafe smarter to avoid triggering an AWX bug. --- .github/BOTMETA.yml | 5 ++-- changelogs/fragments/8225-unsafe.yml | 2 ++ plugins/inventory/cobbler.py | 3 +- plugins/inventory/gitlab_runners.py | 3 +- plugins/inventory/icinga2.py | 3 +- plugins/inventory/linode.py | 3 +- plugins/inventory/lxd.py | 2 +- plugins/inventory/nmap.py | 3 +- plugins/inventory/online.py | 3 +- plugins/inventory/opennebula.py | 3 +- plugins/inventory/proxmox.py | 2 +- plugins/inventory/scaleway.py | 2 +- plugins/inventory/stackpath_compute.py | 3 +- plugins/inventory/virtualbox.py | 3 +- plugins/inventory/xen_orchestra.py | 2 +- plugins/plugin_utils/unsafe.py | 41 ++++++++++++++++++++++++++ 16 files changed, 68 insertions(+), 15 deletions(-) create mode 100644 changelogs/fragments/8225-unsafe.yml create mode 100644 plugins/plugin_utils/unsafe.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 64cbc7021b..4089e300db 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1445,6 +1445,8 @@ files: ignore: matze labels: zypper maintainers: $team_suse + $plugin_utils/unsafe.py: + maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein $tests/fqdn_valid.py: @@ -1501,7 +1503,6 @@ macros: becomes: plugins/become caches: plugins/cache callbacks: plugins/callback - cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter @@ -1509,7 +1510,7 @@ macros: lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules - terminals: plugins/terminal + plugin_utils: plugins/plugin_utils tests: plugins/test team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross diff --git a/changelogs/fragments/8225-unsafe.yml b/changelogs/fragments/8225-unsafe.yml new file mode 100644 index 0000000000..496797ef74 --- /dev/null +++ b/changelogs/fragments/8225-unsafe.yml @@ -0,0 +1,2 @@ +bugfixes: + - "inventory plugins - add unsafe wrapper to avoid marking strings that do not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, https://github.com/ansible-collections/community.general/pull/8225)." diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 8ca36f4264..cdef9944a0 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -117,7 +117,8 @@ from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_text from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name from ansible.module_utils.six import text_type -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe # xmlrpc try: diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index 536f4bb1b8..bd29e8d310 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -83,7 +83,8 @@ keyed_groups: from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: import gitlab diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 6746bb8e0f..d1f2bc617f 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -102,7 +102,8 @@ from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable): diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index fc79f12c5f..e161e086e5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -122,7 +122,8 @@ compose: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index c803f47ddc..cf64f4ee8c 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -175,7 +175,7 @@ from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: import ipaddress diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 3a28007a31..2ca474a1ff 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -126,7 +126,8 @@ from ansible.errors import AnsibleParserError from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index b3a9ecd379..9355d9d414 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -68,7 +68,8 @@ from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six.moves.urllib.parse import urljoin -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin): diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 3babfa2324..b097307c39 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -97,7 +97,8 @@ except ImportError: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.common.text.converters import to_native -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from collections import namedtuple import os diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index ed55ef1b6a..774833c488 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -226,9 +226,9 @@ from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.utils.display import Display -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe # 3rd party imports try: diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 601129f566..dc24a17dab 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -121,10 +121,10 @@ else: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.six import raise_from -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe import ansible.module_utils.six.moves.urllib.parse as urllib_parse diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 9a556d39e0..6b48a49f12 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -72,7 +72,8 @@ from ansible.plugins.inventory import ( Cacheable ) from ansible.utils.display import Display -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe display = Display() diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 8604808e15..79b04ec722 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -62,7 +62,8 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native, to_ from ansible.module_utils.common._collections_compat import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 96dd997701..4094af2468 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -82,9 +82,9 @@ from time import sleep from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe # 3rd party imports try: diff --git a/plugins/plugin_utils/unsafe.py b/plugins/plugin_utils/unsafe.py new file mode 100644 index 0000000000..1eb61bea0f --- /dev/null +++ b/plugins/plugin_utils/unsafe.py @@ -0,0 +1,41 @@ +# Copyright (c) 2023, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.module_utils.six import binary_type, text_type +from ansible.module_utils.common._collections_compat import Mapping, Set +from ansible.module_utils.common.collections import is_sequence +from ansible.utils.unsafe_proxy import ( + AnsibleUnsafe, + wrap_var as _make_unsafe, +) + +_RE_TEMPLATE_CHARS = re.compile(u'[{}]') +_RE_TEMPLATE_CHARS_BYTES = re.compile(b'[{}]') + + +def make_unsafe(value): + if value is None or isinstance(value, AnsibleUnsafe): + return value + + if isinstance(value, Mapping): + return dict((make_unsafe(key), make_unsafe(val)) for key, val in value.items()) + elif isinstance(value, Set): + return set(make_unsafe(elt) for elt in value) + elif is_sequence(value): + return type(value)(make_unsafe(elt) for elt in value) + elif isinstance(value, binary_type): + if _RE_TEMPLATE_CHARS_BYTES.search(value): + value = _make_unsafe(value) + return value + elif isinstance(value, text_type): + if _RE_TEMPLATE_CHARS.search(value): + value = _make_unsafe(value) + return value + + return value From 865de5baa06a61883a1a7ef09b10ee997fe1943d Mon Sep 17 00:00:00 2001 From: Kai Date: Sat, 20 Apr 2024 12:12:45 +0200 Subject: [PATCH 037/482] bitwarden - add support for filtering by organization_id (#8188) * bitwarden - add support for filtering by organization_id * Update changelogs/fragments/8188-bitwarden-add-organization_id.yml Co-authored-by: Felix Fontein * implement PR discussion result on wording * rewrite search_field filtering To correctly handle organization_id and collection_id by passing both to bw. Tests needed to be extended to filter organizations / collections and the testdata needed changes to reflect that a collection always belongs to a single organizaion --------- Co-authored-by: Felix Fontein --- .../8188-bitwarden-add-organization_id.yml | 2 + plugins/lookup/bitwarden.py | 40 +++++---- tests/unit/plugins/lookup/test_bitwarden.py | 86 ++++++++++++++++--- 3 files changed, 100 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/8188-bitwarden-add-organization_id.yml diff --git a/changelogs/fragments/8188-bitwarden-add-organization_id.yml b/changelogs/fragments/8188-bitwarden-add-organization_id.yml new file mode 100644 index 0000000000..c57ba3a479 --- /dev/null +++ b/changelogs/fragments/8188-bitwarden-add-organization_id.yml @@ -0,0 +1,2 @@ +minor_changes: +- bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188). diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 2cb2d19a18..7584cd98a6 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -29,7 +29,7 @@ DOCUMENTATION = """ - Field to retrieve, for example V(name) or V(id). - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. - - When O(collection_id) is set, this field can be undefined to retrieve the whole collection records. + - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. type: str default: name version_added: 5.7.0 @@ -40,6 +40,10 @@ DOCUMENTATION = """ description: Collection ID to filter results by collection. Leave unset to skip filtering. type: str version_added: 6.3.0 + organization_id: + description: Organization ID to filter results by organization. Leave unset to skip filtering. + type: str + version_added: 8.5.0 bw_session: description: Pass session key instead of reading from env. type: str @@ -142,45 +146,44 @@ class Bitwarden(object): raise BitwardenException(err) return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') - def _get_matches(self, search_value, search_field, collection_id=None): + def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None): """Return matching records whose search_field is equal to key. """ # Prepare set of params for Bitwarden CLI - if search_value: - if search_field == 'id': - params = ['get', 'item', search_value] - else: - params = ['list', 'items', '--search', search_value] - if collection_id: - params.extend(['--collectionid', collection_id]) + if search_field == 'id': + params = ['get', 'item', search_value] else: - if not collection_id: - raise AnsibleError("search_value is required if collection_id is not set.") + params = ['list', 'items'] + if search_value: + params.extend(['--search', search_value]) - params = ['list', 'items', '--collectionid', collection_id] + if collection_id: + params.extend(['--collectionid', collection_id]) + if organization_id: + params.extend(['--organizationid', organization_id]) out, err = self._run(params) # This includes things that matched in different fields. initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] - if search_field == 'id' or not search_value: + if search_field == 'id': if initial_matches is None: initial_matches = [] else: initial_matches = [initial_matches] # Filter to only include results from the right field. - return [item for item in initial_matches if item[search_field] == search_value] + return [item for item in initial_matches if not search_value or item[search_field] == search_value] - def get_field(self, field, search_value=None, search_field="name", collection_id=None): + def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): """Return a list of the specified field for records whose search_field match search_value and filtered by collection if collection has been provided. If field is None, return the whole record for each match. """ - matches = self._get_matches(search_value, search_field, collection_id) + matches = self._get_matches(search_value, search_field, collection_id, organization_id) if not field: return matches field_matches = [] @@ -215,15 +218,16 @@ class LookupModule(LookupBase): field = self.get_option('field') search_field = self.get_option('search') collection_id = self.get_option('collection_id') + organization_id = self.get_option('organization_id') _bitwarden.session = self.get_option('bw_session') if not _bitwarden.unlocked: raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") if not terms: - return [_bitwarden.get_field(field, None, search_field, collection_id)] + terms = [None] - return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] + return [_bitwarden.get_field(field, term, search_field, collection_id, organization_id) for term in terms] _bitwarden = Bitwarden() diff --git a/tests/unit/plugins/lookup/test_bitwarden.py b/tests/unit/plugins/lookup/test_bitwarden.py index 9270dd44e1..04cad8d6c8 100644 --- a/tests/unit/plugins/lookup/test_bitwarden.py +++ b/tests/unit/plugins/lookup/test_bitwarden.py @@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import re from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch @@ -13,8 +14,10 @@ from ansible.errors import AnsibleError from ansible.module_utils import six from ansible.plugins.loader import lookup_loader from ansible_collections.community.general.plugins.lookup.bitwarden import Bitwarden +from ansible.parsing.ajson import AnsibleJSONEncoder MOCK_COLLECTION_ID = "3b12a9da-7c49-40b8-ad33-aede017a7ead" +MOCK_ORGANIZATION_ID = "292ba0c6-f289-11ee-9301-ef7b639ccd2a" MOCK_RECORDS = [ { @@ -48,7 +51,7 @@ MOCK_RECORDS = [ "name": "a_test", "notes": None, "object": "item", - "organizationId": None, + "organizationId": MOCK_ORGANIZATION_ID, "passwordHistory": [ { "lastUsedDate": "2022-07-26T23:03:23.405Z", @@ -68,9 +71,7 @@ MOCK_RECORDS = [ "type": 1 }, { - "collectionIds": [ - MOCK_COLLECTION_ID - ], + "collectionIds": [], "deletedDate": None, "favorite": False, "folderId": None, @@ -106,10 +107,30 @@ MOCK_RECORDS = [ "name": "dupe_name", "notes": None, "object": "item", - "organizationId": None, + "organizationId": MOCK_ORGANIZATION_ID, "reprompt": 0, "revisionDate": "2022-07-27T03:42:46.673Z", "type": 1 + }, + { + "collectionIds": [], + "deletedDate": None, + "favorite": False, + "folderId": None, + "id": "2bf517be-fb13-11ee-be89-a345aa369a94", + "login": { + "password": "e", + "passwordRevisionDate": None, + "totp": None, + "username": "f" + }, + "name": "non_collection_org_record", + "notes": None, + "object": "item", + "organizationId": MOCK_ORGANIZATION_ID, + "reprompt": 0, + "revisionDate": "2024-14-15T11:30:00.000Z", + "type": 1 } ] @@ -118,11 +139,41 @@ class MockBitwarden(Bitwarden): unlocked = True - def _get_matches(self, search_value=None, search_field="name", collection_id=None): - if not search_value and collection_id: - return list(filter(lambda record: collection_id in record['collectionIds'], MOCK_RECORDS)) + def _run(self, args, stdin=None, expected_rc=0): + if args[0] == 'get': + if args[1] == 'item': + for item in MOCK_RECORDS: + if item.get('id') == args[2]: + return AnsibleJSONEncoder().encode(item), '' + if args[0] == 'list': + if args[1] == 'items': + try: + search_value = args[args.index('--search') + 1] + except ValueError: + search_value = None - return list(filter(lambda record: record[search_field] == search_value, MOCK_RECORDS)) + try: + collection_to_filter = args[args.index('--collectionid') + 1] + except ValueError: + collection_to_filter = None + + try: + organization_to_filter = args[args.index('--organizationid') + 1] + except ValueError: + organization_to_filter = None + + items = [] + for item in MOCK_RECORDS: + if search_value and not re.search(search_value, item.get('name')): + continue + if collection_to_filter and collection_to_filter not in item.get('collectionIds', []): + continue + if organization_to_filter and item.get('organizationId') != organization_to_filter: + continue + items.append(item) + return AnsibleJSONEncoder().encode(items), '' + + return '[]', '' class LoggedOutMockBitwarden(MockBitwarden): @@ -194,4 +245,19 @@ class TestLookupModule(unittest.TestCase): @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) def test_bitwarden_plugin_full_collection(self): # Try to retrieve the full records of the given collection. - self.assertEqual(MOCK_RECORDS, self.lookup.run(None, collection_id=MOCK_COLLECTION_ID)[0]) + self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, collection_id=MOCK_COLLECTION_ID)[0]) + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_full_organization(self): + self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2], MOCK_RECORDS[3]], + self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID)[0]) + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_filter_organization(self): + self.assertEqual([MOCK_RECORDS[2]], + self.lookup.run(['dupe_name'], organization_id=MOCK_ORGANIZATION_ID)[0]) + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_full_collection_organization(self): + self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, + collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID)[0]) From be4d5b7dc4d63d3a828fd480f9728ce012310133 Mon Sep 17 00:00:00 2001 From: Jakob Lund Date: Sat, 20 Apr 2024 12:12:55 +0200 Subject: [PATCH 038/482] ini_file - add feature 'section_has_values' (#7505) * insert new code * add changelog * add argument_spec * sanity check * docstring version_added * version-added-must-be-major-or-minor * Update plugins/modules/ini_file.py Co-authored-by: Felix Fontein * check for default value `None` * typo in example * add integration test and rename option * add license * update "version added" in docstring * insert new code * remove whitespace * update examples * support exclusive, allow_no_value, multiple values in section_has_values * prefer Todd's variable naming in loops * resolve number clash in file names * pass sanity test validate-modules * Documentation updates --------- Co-authored-by: Felix Fontein Co-authored-by: Todd Lewis --- .../fragments/7505-ini_file-section_has.yml | 5 + plugins/modules/ini_file.py | 133 ++++++- .../targets/ini_file/tasks/main.yml | 4 +- .../ini_file/tasks/tests/08-section.yml | 341 ++++++++++++++++++ 4 files changed, 476 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/7505-ini_file-section_has.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/08-section.yml diff --git a/changelogs/fragments/7505-ini_file-section_has.yml b/changelogs/fragments/7505-ini_file-section_has.yml new file mode 100644 index 0000000000..0424764fd0 --- /dev/null +++ b/changelogs/fragments/7505-ini_file-section_has.yml @@ -0,0 +1,5 @@ +minor_changes: + - "ini_file - add an optional parameter ``section_has_values``. If the + target ini file contains more than one ``section``, use ``section_has_values`` + to specify which one should be updated + (https://github.com/ansible-collections/community.general/pull/7505)." diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index ec71a94731..affee2a4f7 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -44,6 +44,30 @@ options: - If being omitted, the O(option) will be placed before the first O(section). - Omitting O(section) is also required if the config format does not support sections. type: str + section_has_values: + type: list + elements: dict + required: false + suboptions: + option: + type: str + description: Matching O(section) must contain this option. + required: true + value: + type: str + description: Matching O(section_has_values[].option) must have this specific value. + values: + description: + - The string value to be associated with an O(section_has_values[].option). + - Mutually exclusive with O(section_has_values[].value). + - O(section_has_values[].value=v) is equivalent to O(section_has_values[].values=[v]). + type: list + elements: str + description: + - Among possibly multiple sections of the same name, select the first one that contains matching options and values. + - With O(state=present), if a suitable section is not found, a new section will be added, including the required options. + - With O(state=absent), at most one O(section) is removed if it contains the values. + version_added: 8.6.0 option: description: - If set (required for changing a O(value)), this is the name of the option. @@ -182,6 +206,57 @@ EXAMPLES = r''' option: beverage value: lemon juice state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.11/32 + mode: '0600' + state: absent + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.11/32 + mode: '0600' + state: absent + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present ''' import io @@ -222,7 +297,19 @@ def update_section_line(option, changed, section_lines, index, changed_lines, ig return (changed, msg) -def do_ini(module, filename, section=None, option=None, values=None, +def check_section_has_values(section_has_values, section_lines): + if section_has_values is not None: + for condition in section_has_values: + for line in section_lines: + match = match_opt(condition["option"], line) + if match and (len(condition["values"]) == 0 or match.group(7) in condition["values"]): + break + else: + return False + return True + + +def do_ini(module, filename, section=None, section_has_values=None, option=None, values=None, state='present', exclusive=True, backup=False, no_extra_spaces=False, ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False): @@ -307,14 +394,22 @@ def do_ini(module, filename, section=None, option=None, values=None, section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip()))) for index, line in enumerate(ini_lines): + # end of section: + if within_section and line.startswith(u'['): + if check_section_has_values( + section_has_values, ini_lines[section_start:index] + ): + section_end = index + break + else: + # look for another section + within_section = False + section_start = section_end = 0 + # find start and end of section if section_pattern.match(line): within_section = True section_start = index - elif line.startswith(u'['): - if within_section: - section_end = index - break before = ini_lines[0:section_start] section_lines = ini_lines[section_start:section_end] @@ -435,6 +530,18 @@ def do_ini(module, filename, section=None, option=None, values=None, if not within_section and state == 'present': ini_lines.append(u'[%s]\n' % section) msg = 'section and option added' + if section_has_values: + for condition in section_has_values: + if condition['option'] != option: + if len(condition['values']) > 0: + for value in condition['values']: + ini_lines.append(assignment_format % (condition['option'], value)) + elif allow_no_value: + ini_lines.append(u'%s\n' % condition['option']) + elif not exclusive: + for value in condition['values']: + if value not in values: + values.append(value) if option and values: for value in values: ini_lines.append(assignment_format % (option, value)) @@ -476,6 +583,11 @@ def main(): argument_spec=dict( path=dict(type='path', required=True, aliases=['dest']), section=dict(type='str'), + section_has_values=dict(type='list', elements='dict', options=dict( + option=dict(type='str', required=True), + value=dict(type='str'), + values=dict(type='list', elements='str') + ), default=None, mutually_exclusive=[['value', 'values']]), option=dict(type='str'), value=dict(type='str'), values=dict(type='list', elements='str'), @@ -498,6 +610,7 @@ def main(): path = module.params['path'] section = module.params['section'] + section_has_values = module.params['section_has_values'] option = module.params['option'] value = module.params['value'] values = module.params['values'] @@ -519,8 +632,16 @@ def main(): elif values is None: values = [] + if section_has_values: + for condition in section_has_values: + if condition['value'] is not None: + condition['values'] = [condition['value']] + elif condition['values'] is None: + condition['values'] = [] +# raise Exception("section_has_values: {}".format(section_has_values)) + (changed, backup_file, diff, msg) = do_ini( - module, path, section, option, values, state, exclusive, backup, + module, path, section, section_has_values, option, values, state, exclusive, backup, no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow) if not module.check_mode and os.path.exists(path): diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 0ed3c28172..8fd88074b2 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -16,7 +16,6 @@ - name: include tasks block: - - name: include tasks to perform basic tests include_tasks: tests/00-basic.yml @@ -50,3 +49,6 @@ - name: include tasks to test optional spaces in section headings include_tasks: tests/07-section_name_spaces.yml + + - name: include tasks to test section_has_values + include_tasks: tests/08-section.yml diff --git a/tests/integration/targets/ini_file/tasks/tests/08-section.yml b/tests/integration/targets/ini_file/tasks/tests/08-section.yml new file mode 100644 index 0000000000..4f3a135e11 --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/08-section.yml @@ -0,0 +1,341 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +## testing section selection + +- name: test-section 1 - Create starting ini file + copy: + content: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + + dest: "{{ output_file }}" + +- name: test-section 1 - Modify starting ini file + ini_file: + dest: "{{ output_file }}" + section: drinks + option: car + value: volvo + state: present + register: result1 + +- name: test-section 1 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 1 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + car = volvo + + [drinks] + fav = lemonade + beverage = pineapple juice + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 1 - Option was added to first section + assert: + that: + - result1 is changed + - result1.msg == 'option added' + - output1 == expected1 + +# ---------------- + +- name: test-section 2 - Create starting ini file + copy: + content: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + + dest: "{{ output_file }}" + +- name: test-section 2 - Modify starting ini file + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: beverage + value: pineapple juice + option: car + value: volvo + state: present + register: result1 + +- name: test-section 2 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 2 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + car = volvo + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 2 - Option added to second section specified with section_has_values + assert: + that: + - result1 is changed + - result1.msg == 'option added' + - output1 == expected1 + +# ---------------- + +- name: test-section 3 - Create starting ini file + copy: + content: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + + dest: "{{ output_file }}" + +- name: test-section 3 - Modify starting ini file + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: beverage + value: pineapple juice + option: fav + value: lemonade + state: absent + register: result1 + +- name: test-section 3 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 3 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + beverage = pineapple juice + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 3 - Option was removed from specified section + assert: + that: + - result1 is changed + - result1.msg == 'option changed' + - output1 == expected1 + +# ---------------- + +- name: test-section 4 - Create starting ini file + copy: + content: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + + dest: "{{ output_file }}" + +- name: test-section 4 - Modify starting ini file + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: beverage + value: alligator slime + option: fav + value: tea + state: present + register: result1 + +- name: test-section 4 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 4 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + fav = lemonade + beverage = pineapple juice + [drinks] + beverage = alligator slime + fav = tea + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 4 - New section created, including required values + assert: + that: + - result1 is changed + - result1.msg == 'section and option added' + - output1 == expected1 + +# ---------------- + +- name: test-section 5 - Modify test-section 4 result file + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: fav + value: lemonade + - option: beverage + value: pineapple juice + state: absent + register: result1 + +- name: test-section 5 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 5 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + beverage = alligator slime + fav = tea + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 5 - Section removed as specified + assert: + that: + - result1 is changed + - result1.msg == 'section removed' + - output1 == expected1 + +# ---------------- + +- name: test-section 6 - Modify test-section 5 result file with multiple values + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: fav + values: + - cherry + - lemon + - vanilla + - option: beverage + value: pineapple juice + state: present + option: fav + values: + - vanilla + - grape + exclusive: false + register: result1 + +- name: test-section 6 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 6 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + beverage = alligator slime + fav = tea + [drinks] + beverage = pineapple juice + fav = vanilla + fav = grape + fav = cherry + fav = lemon + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 6 - New section added + assert: + that: + - result1 is changed + - result1.msg == 'section and option added' + - output1 == expected1 + +# ---------------- + +- name: test-section 7 - Modify test-section 6 result file with exclusive value + ini_file: + dest: "{{ output_file }}" + section: drinks + section_has_values: + - option: fav + value: vanilla + state: present + option: fav + value: cherry + exclusive: true + register: result1 + +- name: test-section 7 - Read modified file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-section 7 - Create expected result + set_fact: + expected1: | + [drinks] + fav = lemonade + beverage = orange juice + + [drinks] + beverage = alligator slime + fav = tea + [drinks] + beverage = pineapple juice + fav = cherry + output1: "{{ output_content.content | b64decode }}" + +- name: test-section 7 - Option changed + assert: + that: + - result1 is changed + - result1.msg == 'option changed' + - output1 == expected1 From 486c26b2247c37428c41a094ab7db593f3355734 Mon Sep 17 00:00:00 2001 From: Stijn Tintel Date: Sat, 20 Apr 2024 13:14:15 +0300 Subject: [PATCH 039/482] filesystem: add bcachefs support (#8126) Signed-off-by: Stijn Tintel --- .../8126-filesystem-bcachefs-support.yaml | 2 + plugins/modules/filesystem.py | 50 +++++++++++++++++-- .../targets/filesystem/defaults/main.yml | 1 + .../targets/filesystem/tasks/main.yml | 6 ++- .../targets/filesystem/tasks/setup.yml | 10 ++++ 5 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8126-filesystem-bcachefs-support.yaml diff --git a/changelogs/fragments/8126-filesystem-bcachefs-support.yaml b/changelogs/fragments/8126-filesystem-bcachefs-support.yaml new file mode 100644 index 0000000000..32ff5c64da --- /dev/null +++ b/changelogs/fragments/8126-filesystem-bcachefs-support.yaml @@ -0,0 +1,2 @@ +minor_changes: + - filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126). diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py index ec361245bd..73e8c79c6a 100644 --- a/plugins/modules/filesystem.py +++ b/plugins/modules/filesystem.py @@ -40,11 +40,12 @@ options: default: present version_added: 1.3.0 fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] + choices: [ bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] description: - Filesystem type to be created. This option is required with O(state=present) (or if O(state) is omitted). - ufs support has been added in community.general 3.4.0. + - bcachefs support has been added in community.general 8.6.0. type: str aliases: [type] dev: @@ -67,7 +68,7 @@ options: resizefs: description: - If V(true), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. + - Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. Attempts to resize other filesystem types will fail. - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, so resizing of XFS is @@ -86,7 +87,7 @@ options: - The UUID options specified in O(opts) take precedence over this value. - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values. - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set. - - Supported for O(fstype) being one of C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs). + - Supported for O(fstype) being one of C(bcachefs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs). - This is B(not idempotent). Specifying this option will always result in a change. - Mutually exclusive with O(resizefs). type: str @@ -405,6 +406,48 @@ class Reiserfs(Filesystem): MKFS_FORCE_FLAGS = ['-q'] +class Bcachefs(Filesystem): + MKFS = 'mkfs.bcachefs' + MKFS_FORCE_FLAGS = ['--force'] + MKFS_SET_UUID_OPTIONS = ['-U', '--uuid'] + INFO = 'bcachefs' + GROW = 'bcachefs' + GROW_MAX_SPACE_FLAGS = ['device', 'resize'] + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'show-super', str(dev)], check_rc=True) + + for line in stdout.splitlines(): + if "Size: " in line: + parts = line.split() + unit = parts[2] + + base = None + exp = None + + units_2 = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"] + units_10 = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] + + try: + exp = units_2.index(unit) + base = 1024 + except ValueError: + exp = units_10.index(unit) + base = 1000 + + if exp == 0: + value = int(parts[1]) + else: + value = float(parts[1]) + + if base is not None and exp is not None: + return int(value * pow(base, exp)) + + raise ValueError(repr(stdout)) + + class Btrfs(Filesystem): MKFS = 'mkfs.btrfs' INFO = 'btrfs' @@ -567,6 +610,7 @@ class UFS(Filesystem): FILESYSTEMS = { + 'bcachefs': Bcachefs, 'ext2': Ext2, 'ext3': Ext3, 'ext4': Ext4, diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index ec446d2417..7ff30bcd54 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -15,6 +15,7 @@ tested_filesystems: # - 1.7.0 requires at least 30Mo # - 1.10.0 requires at least 38Mo # - resizefs asserts when initial fs is smaller than 60Mo and seems to require 1.10.0 + bcachefs: {fssize: 20, grow: true, new_uuid: null} ext4: {fssize: 10, grow: true, new_uuid: 'random'} ext4dev: {fssize: 10, grow: true, new_uuid: 'random'} ext3: {fssize: 10, grow: true, new_uuid: 'random'} diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 0c15c21556..51361079ce 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -36,7 +36,7 @@ # Not available: btrfs, lvm, f2fs, ocfs2 # All BSD systems use swap fs, but only Linux needs mkswap # Supported: ext2/3/4 (e2fsprogs), xfs (xfsprogs), reiserfs (progsreiserfs), vfat - - 'not (ansible_system == "FreeBSD" and item.0.key in ["btrfs", "f2fs", "swap", "lvm", "ocfs2"])' + - 'not (ansible_system == "FreeBSD" and item.0.key in ["bcachefs", "btrfs", "f2fs", "swap", "lvm", "ocfs2"])' # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])' @@ -46,6 +46,10 @@ # Other limitations and corner cases + # bcachefs only on Alpine > 3.18 and Arch Linux for now + # other distributions have too old versions of bcachefs-tools and/or util-linux (blkid for UUID tests) + - 'ansible_distribution == "Alpine" and ansible_distribution_version is version("3.18", ">") and item.0.key == "bcachefs"' + - 'ansible_distribution == "Archlinux" and item.0.key == "bcachefs"' # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI - 'not (ansible_distribution in ["CentOS", "RedHat"] and item.0.key in ["f2fs", "reiserfs"])' - 'not (ansible_os_family == "RedHat" and ansible_distribution_major_version is version("8", ">=") and diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 97dafaeeec..77c028acaf 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -16,6 +16,16 @@ - e2fsprogs - xfsprogs +- name: "Install bcachefs tools" + ansible.builtin.package: + name: bcachefs-tools + state: present + when: + # bcachefs only on Alpine > 3.18 and Arch Linux for now + # other distributions have too old versions of bcachefs-tools and/or util-linux (blkid for UUID tests) + - ansible_distribution == "Alpine" and ansible_distribution_version is version("3.18", ">") + - ansible_distribution == "Archlinux" + - name: "Install btrfs progs" ansible.builtin.package: name: btrfs-progs From f55342d8afc0c14e9ebf16d2e7bc13d1d7e31fd5 Mon Sep 17 00:00:00 2001 From: Eike Waldt Date: Sun, 21 Apr 2024 00:25:10 +0200 Subject: [PATCH 040/482] keycloak_client: add sorted defaultClientScopes and optionalClientScopes to normalizations (#8223) keycloak_client: add sorted defaultClientScopes and optionalClientScopes to normalizations Signed-off-by: Eike Waldt --- .../8223-keycloak_client-additional-normalizations.yaml | 2 ++ plugins/modules/keycloak_client.py | 6 ++++++ 2 files changed, 8 insertions(+) create mode 100644 changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml diff --git a/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml b/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml new file mode 100644 index 0000000000..47f7e6bd7b --- /dev/null +++ b/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` to normalizations (https://github.com/ansible-collections/community.general/pull/8223). diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index 0766608b45..cd9c60bacf 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -744,6 +744,12 @@ def normalise_cr(clientrep, remove_ids=False): if 'attributes' in clientrep: clientrep['attributes'] = list(sorted(clientrep['attributes'])) + if 'defaultClientScopes' in clientrep: + clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) + + if 'optionalClientScopes' in clientrep: + clientrep['optionalClientScopes'] = list(sorted(clientrep['optionalClientScopes'])) + if 'redirectUris' in clientrep: clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) From 9d66a1dc1e54c242c899fe1dc6c1ceab7871a894 Mon Sep 17 00:00:00 2001 From: Eike Waldt Date: Sun, 21 Apr 2024 00:25:57 +0200 Subject: [PATCH 041/482] keycloak_realm: add normalizations for enabledEventTypes, and supportedLocales (#8224) keycloak_realm: add nomalizations for enabledEventTypes, and supportedLocales Signed-off-by: Eike Waldt --- ...224-keycloak_realm-add-normalizations.yaml | 2 ++ plugins/modules/keycloak_realm.py | 29 +++++++++++++++++-- 2 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml diff --git a/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml b/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml new file mode 100644 index 0000000000..0574141f61 --- /dev/null +++ b/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` (https://github.com/ansible-collections/community.general/pull/8224). diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 9f2e72b525..6128c9e4c7 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -582,6 +582,27 @@ from ansible_collections.community.general.plugins.module_utils.identity.keycloa from ansible.module_utils.basic import AnsibleModule +def normalise_cr(realmrep): + """ Re-sorts any properties where the order is important so that diff's is minimised and the change detection is more effective. + + :param realmrep: the realmrep dict to be sanitized + :return: normalised realmrep dict + """ + # Avoid the dict passed in to be modified + realmrep = realmrep.copy() + + if 'enabledEventTypes' in realmrep: + realmrep['enabledEventTypes'] = list(sorted(realmrep['enabledEventTypes'])) + + if 'otpSupportedApplications' in realmrep: + realmrep['otpSupportedApplications'] = list(sorted(realmrep['otpSupportedApplications'])) + + if 'supportedLocales' in realmrep: + realmrep['supportedLocales'] = list(sorted(realmrep['supportedLocales'])) + + return realmrep + + def sanitize_cr(realmrep): """ Removes probably sensitive details from a realm representation. @@ -595,7 +616,7 @@ def sanitize_cr(realmrep): if 'saml.signing.private.key' in result['attributes']: result['attributes'] = result['attributes'].copy() result['attributes']['saml.signing.private.key'] = '********' - return result + return normalise_cr(result) def main(): @@ -777,9 +798,11 @@ def main(): result['changed'] = True if module.check_mode: # We can only compare the current realm with the proposed updates we have + before_norm = normalise_cr(before_realm) + desired_norm = normalise_cr(desired_realm) if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(desired_realm)) + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) result['changed'] = (before_realm != desired_realm) module.exit_json(**result) From be11d0d4091f87fb0cc0b2ecb947c0c037cf1f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Lang=C3=A9?= Date: Sun, 21 Apr 2024 14:54:45 +0200 Subject: [PATCH 042/482] Add an explicit `select` option to `portage` module (#8236) * Add an explicit `select` option to `portage` module This is a fix for #6226 * Apply suggestions from code review Co-authored-by: Felix Fontein * Default `select` option to None, making it more retrocompatible * Add changelog fragment for the PR * Update changelogs/fragments/8236-portage-select-feature.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8236-portage-select-feature.yml | 2 ++ plugins/modules/portage.py | 11 +++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelogs/fragments/8236-portage-select-feature.yml diff --git a/changelogs/fragments/8236-portage-select-feature.yml b/changelogs/fragments/8236-portage-select-feature.yml new file mode 100644 index 0000000000..742d5cc966 --- /dev/null +++ b/changelogs/fragments/8236-portage-select-feature.yml @@ -0,0 +1,2 @@ +minor_changes: + - portage - adds the possibility to explicitely tell portage to write packages to world file (https://github.com/ansible-collections/community.general/issues/6226, https://github.com/ansible-collections/community.general/pull/8236). diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py index 112f6d2d7c..8ae8efb087 100644 --- a/plugins/modules/portage.py +++ b/plugins/modules/portage.py @@ -121,6 +121,14 @@ options: type: bool default: false + select: + description: + - If set to V(true), explicitely add the package to the world file. + - Please note that this option is not used for idempotency, it is only used + when actually installing a package. + type: bool + version_added: 8.6.0 + sync: description: - Sync package repositories first @@ -374,6 +382,7 @@ def emerge_packages(module, packages): 'loadavg': '--load-average', 'backtrack': '--backtrack', 'withbdeps': '--with-bdeps', + 'select': '--select', } for flag, arg in emerge_flags.items(): @@ -523,6 +532,7 @@ def main(): nodeps=dict(default=False, type='bool'), onlydeps=dict(default=False, type='bool'), depclean=dict(default=False, type='bool'), + select=dict(default=None, type='bool'), quiet=dict(default=False, type='bool'), verbose=dict(default=False, type='bool'), sync=dict(default=None, choices=['yes', 'web', 'no']), @@ -543,6 +553,7 @@ def main(): ['quiet', 'verbose'], ['quietbuild', 'verbose'], ['quietfail', 'verbose'], + ['oneshot', 'select'], ], supports_check_mode=True, ) From a05a5982a66e1c2cc37046ef97c637c782d1fcdd Mon Sep 17 00:00:00 2001 From: Matt Adams Date: Sun, 21 Apr 2024 07:55:27 -0500 Subject: [PATCH 043/482] bitwarden_secrets_manager: implement rate limit retry with backoff (#8238) * bitwarden_secrets_manager: implement rate limit retry with backoff (#8230) * bitwarden_secrets_manager: add changelog fragment for 90cd2d61 (#8238) * bitwarden_secrets_manager: clarify "Too many requests" is an error condition (#8238) * bitwarden_secrets_manager: avoid an extra _run_with_retry execution after the last (very long) delay * bitwarden_secrets_manager: changelog fragment key and reference issue url --- ...-manager-rate-limit-retry-with-backoff.yml | 2 ++ plugins/lookup/bitwarden_secrets_manager.py | 21 ++++++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml diff --git a/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml b/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml new file mode 100644 index 0000000000..b9d80a7cba --- /dev/null +++ b/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml @@ -0,0 +1,2 @@ +bugfixes: + - "bitwarden_secrets_manager lookup plugin - implements retry with exponential backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238)." diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py index 2d6706bee1..8cabc693ff 100644 --- a/plugins/lookup/bitwarden_secrets_manager.py +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -70,6 +70,7 @@ RETURN = """ """ from subprocess import Popen, PIPE +from time import sleep from ansible.errors import AnsibleLookupError from ansible.module_utils.common.text.converters import to_text @@ -84,11 +85,29 @@ class BitwardenSecretsManagerException(AnsibleLookupError): class BitwardenSecretsManager(object): def __init__(self, path='bws'): self._cli_path = path + self._max_retries = 3 + self._retry_delay = 1 @property def cli_path(self): return self._cli_path + def _run_with_retry(self, args, stdin=None, retries=0): + out, err, rc = self._run(args, stdin) + + if rc != 0: + if retries >= self._max_retries: + raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.") + + if "Too many requests" in err: + delay = self._retry_delay * (2 ** retries) + sleep(delay) + return self._run_with_retry(args, stdin, retries + 1) + else: + raise BitwardenSecretsManagerException(f"Command failed with return code {rc}: {err}") + + return out, err, rc + def _run(self, args, stdin=None): p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) out, err = p.communicate(stdin) @@ -107,7 +126,7 @@ class BitwardenSecretsManager(object): 'get', 'secret', secret_id ] - out, err, rc = self._run(params) + out, err, rc = self._run_with_retry(params) if rc != 0: raise BitwardenSecretsManagerException(to_text(err)) From 8f98ba91190bc7ac36e44f29ac776db835f3a5ab Mon Sep 17 00:00:00 2001 From: Denis Borisov Date: Sun, 21 Apr 2024 15:56:01 +0300 Subject: [PATCH 044/482] java_cert: add cert_content argument (#8153) * add cert_content arg (#8034) * add changelog fragment (#8034) * Update plugins/modules/java_cert.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8153-java_cert-add-cert_content-arg.yml | 2 + plugins/modules/java_cert.py | 37 ++++++++++++++++--- 2 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8153-java_cert-add-cert_content-arg.yml diff --git a/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml b/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml new file mode 100644 index 0000000000..40ae1f84a4 --- /dev/null +++ b/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153). diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py index 72302b12c1..e2d04b71e2 100644 --- a/plugins/modules/java_cert.py +++ b/plugins/modules/java_cert.py @@ -28,7 +28,7 @@ options: cert_url: description: - Basic URL to fetch SSL certificate from. - - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: str cert_port: description: @@ -39,8 +39,14 @@ options: cert_path: description: - Local path to load certificate from. - - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: path + cert_content: + description: + - Content of the certificate used to create the keystore. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: str + version_added: 8.6.0 cert_alias: description: - Imported certificate alias. @@ -55,10 +61,10 @@ options: pkcs12_path: description: - Local path to load PKCS12 keystore from. - - Unlike O(cert_url) and O(cert_path), the PKCS12 keystore embeds the private key matching + - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching the certificate, and is used to import both the certificate and its private key into the java keystore. - - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: path pkcs12_password: description: @@ -149,6 +155,19 @@ EXAMPLES = r''' cert_alias: LE_RootCA trust_cacert: true +- name: Import trusted CA from the SSL certificate stored in the cert_content variable + community.general.java_cert: + cert_content: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + cert_alias: LE_RootCA + trust_cacert: true + - name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist community.general.java_cert: cert_url: google.com @@ -487,6 +506,7 @@ def main(): argument_spec = dict( cert_url=dict(type='str'), cert_path=dict(type='path'), + cert_content=dict(type='str'), pkcs12_path=dict(type='path'), pkcs12_password=dict(type='str', no_log=True), pkcs12_alias=dict(type='str'), @@ -503,11 +523,11 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, - required_if=[['state', 'present', ('cert_path', 'cert_url', 'pkcs12_path'), True], + required_if=[['state', 'present', ('cert_path', 'cert_url', 'cert_content', 'pkcs12_path'), True], ['state', 'absent', ('cert_url', 'cert_alias'), True]], required_together=[['keystore_path', 'keystore_pass']], mutually_exclusive=[ - ['cert_url', 'cert_path', 'pkcs12_path'] + ['cert_url', 'cert_path', 'cert_content', 'pkcs12_path'] ], supports_check_mode=True, add_file_common_args=True, @@ -515,6 +535,7 @@ def main(): url = module.params.get('cert_url') path = module.params.get('cert_path') + content = module.params.get('cert_content') port = module.params.get('cert_port') pkcs12_path = module.params.get('pkcs12_path') @@ -582,6 +603,10 @@ def main(): # certificate to stdout so we don't need to do any transformations. new_certificate = path + elif content: + with open(new_certificate, "w") as f: + f.write(content) + elif url: # Getting the X509 digest from a URL is the same as from a path, we just have # to download the cert first From 073565631972cef59e808ab8cb4349acffe6199c Mon Sep 17 00:00:00 2001 From: "Kenneth Benzie (Benie)" Date: Sun, 21 Apr 2024 19:09:54 +0100 Subject: [PATCH 045/482] Add support for state=latest to flatpak module (#8221) Fixes #6563 by extending the allowed values of the `state` parameter to include `latest`. To do this, the `update_flat()` function is introduced which borrows the majority of its implementation from both the existing `install_flat()` and `remove_flat()` functions. The documentation and examples have been expanded describing what to expect when using `state=latest`. --- plugins/modules/flatpak.py | 83 +++++++-- .../targets/flatpak/tasks/check_mode.yml | 98 +++++++++++ .../targets/flatpak/tasks/test.yml | 159 ++++++++++++++++++ 3 files changed, 328 insertions(+), 12 deletions(-) diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index 80dbabdfa0..15e404d45b 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -26,7 +26,9 @@ extends_documentation_fragment: - community.general.attributes attributes: check_mode: - support: full + support: partial + details: + - If O(state=latest), the module will always return C(changed=true). diff_mode: support: none options: @@ -53,12 +55,12 @@ options: - Both C(https://) and C(http://) URLs are supported. - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - - When used with O(state=absent), it is recommended to specify the name in the reverse DNS - format. - - When supplying a URL with O(state=absent), the module will try to match the - installed flatpak based on the name of the flatpakref to remove it. However, there is no - guarantee that the names of the flatpakref file and the reverse DNS name of the installed - flatpak do match. + - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in + the reverse DNS format. + - When supplying a URL with O(state=absent) or O(state=latest), the module will try to match the + installed flatpak based on the name of the flatpakref to remove or update it. However, there + is no guarantee that the names of the flatpakref file and the reverse DNS name of the + installed flatpak do match. type: list elements: str required: true @@ -82,7 +84,8 @@ options: state: description: - Indicates the desired package state. - choices: [ absent, present ] + - The value V(latest) is supported since community.general 8.6.0. + choices: [ absent, present, latest ] type: str default: present ''' @@ -118,6 +121,37 @@ EXAMPLES = r''' - org.inkscape.Inkscape - org.mozilla.firefox +- name: Update the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: latest + +- name: Update the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: latest + no_dependencies: true + +- name: Update the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: latest + method: user + +- name: Update the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: latest + remote: gnome + +- name: Update multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: latest + - name: Remove the gedit flatpak community.general.flatpak: name: org.gnome.gedit @@ -195,6 +229,28 @@ def install_flat(module, binary, remote, names, method, no_dependencies): result['changed'] = True +def update_flat(module, binary, names, method, no_dependencies): + """Update existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "update", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] + command += installed_flat_names + stdout = _flatpak_command(module, module.check_mode, command) + result["changed"] = ( + True if module.check_mode else stdout.find("Nothing to do.") == -1 + ) + + def uninstall_flat(module, binary, names, method): """Remove existing flatpaks.""" global result # pylint: disable=global-variable-not-assigned @@ -313,7 +369,7 @@ def main(): method=dict(type='str', default='system', choices=['user', 'system']), state=dict(type='str', default='present', - choices=['absent', 'present']), + choices=['absent', 'present', 'latest']), no_dependencies=dict(type='bool', default=False), executable=dict(type='path', default='flatpak') ), @@ -338,10 +394,13 @@ def main(): module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) installed, not_installed = flatpak_exists(module, binary, name, method) - if state == 'present' and not_installed: - install_flat(module, binary, remote, not_installed, method, no_dependencies) - elif state == 'absent' and installed: + if state == 'absent' and installed: uninstall_flat(module, binary, installed, method) + else: + if state == 'latest' and installed: + update_flat(module, binary, installed, method, no_dependencies) + if state in ('present', 'latest') and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) module.exit_json(**result) diff --git a/tests/integration/targets/flatpak/tasks/check_mode.yml b/tests/integration/targets/flatpak/tasks/check_mode.yml index 9f52dc1229..b4538200ff 100644 --- a/tests/integration/targets/flatpak/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak/tasks/check_mode.yml @@ -52,6 +52,38 @@ - removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" +# state=latest on absent flatpak + +- name: Test state=latest of absent flatpak (check mode) + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: latest + register: latest_result + check_mode: true + +- name: Verify state=latest of absent flatpak test result (check mode) + assert: + that: + - latest_result is changed + msg: "state=latest an absent flatpak shall mark module execution as changed" + +- name: Test non-existent idempotency of state=latest of absent flatpak (check mode) + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: latest + register: double_latest_result + check_mode: true + +- name: Verify non-existent idempotency of state=latest of absent flatpak test result (check mode) + assert: + that: + - double_latest_result is changed + msg: | + state=latest an absent flatpak a second time shall still mark module execution + as changed in check mode + # state=present with url on absent flatpak - name: Test addition of absent flatpak with url (check mode) @@ -101,6 +133,40 @@ - url_removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" +# state=latest with url on absent flatpak + +- name: Test state=latest of absent flatpak with url (check mode) + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote + state: latest + register: url_latest_result + check_mode: true + +- name: Verify state=latest of absent flatpak with url test result (check mode) + assert: + that: + - url_latest_result is changed + msg: "state=latest an absent flatpak from URL shall mark module execution as changed" + +- name: Test non-existent idempotency of state=latest of absent flatpak with url (check mode) + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote + state: latest + register: double_url_latest_result + check_mode: true + +- name: > + Verify non-existent idempotency of additionof state=latest flatpak with url test + result (check mode) + assert: + that: + - double_url_latest_result is changed + msg: | + state=latest an absent flatpak from URL a second time shall still mark module execution + as changed in check mode + # - Tests with present flatpak ------------------------------------------------- # state=present on present flatpak @@ -149,6 +215,22 @@ Removing a present flatpak a second time shall still mark module execution as changed in check mode +# state=latest on present flatpak + +- name: Test state=latest of present flatpak (check mode) + flatpak: + name: com.dummy.App2 + remote: dummy-remote + state: latest + register: latest_present_result + check_mode: true + +- name: Verify latest test result of present flatpak (check mode) + assert: + that: + - latest_present_result is changed + msg: "state=latest an present flatpak shall mark module execution as changed" + # state=present with url on present flatpak - name: Test addition with url of present flatpak (check mode) @@ -195,3 +277,19 @@ that: - double_url_removal_present_result is changed msg: Removing an absent flatpak a second time shall still mark module execution as changed + +# state=latest with url on present flatpak + +- name: Test state=latest with url of present flatpak (check mode) + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: latest + register: url_latest_present_result + check_mode: true + +- name: Verify state=latest with url of present flatpak test result (check mode) + assert: + that: + - url_latest_present_result is changed + msg: "state=latest a present flatpak from URL shall mark module execution as changed" diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 29c4efbe95..658f7b1168 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -65,6 +65,45 @@ - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" +# state=latest + +- name: Test state=latest - {{ method }} + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: latest_result + +- name: Verify state=latest test result - {{ method }} + assert: + that: + - latest_result is changed + msg: "state=latest shall add flatpak when absent" + +- name: Test idempotency of state=latest - {{ method }} + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: double_latest_result + +- name: Verify idempotency of state=latest test result - {{ method }} + assert: + that: + - double_latest_result is not changed + msg: "state=latest shall not do anything when flatpak is already present" + +- name: Cleanup after state=present test - {{ method }} + flatpak: + name: com.dummy.App1 + state: absent + method: "{{ method }}" + no_dependencies: true + # state=present with url as name - name: Test addition with url - {{ method }} @@ -152,6 +191,45 @@ method: "{{ method }}" no_dependencies: true +# state=latest with url as name + +- name: Test state=latest with url - {{ method }} + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: url_latest_result + +- name: Verify state=latest test result - {{ method }} + assert: + that: + - url_latest_result is changed + msg: "state=present with url as name shall add flatpak when absent" + +- name: Test idempotency of state=latest with url - {{ method }} + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: double_url_latest_result + +- name: Verify idempotency of state=latest with url test result - {{ method }} + assert: + that: + - double_url_latest_result is not changed + msg: "state=present with url as name shall not do anything when flatpak is already present" + +- name: Cleanup after state=present with url test - {{ method }} + flatpak: + name: com.dummy.App1 + state: absent + method: "{{ method }}" + no_dependencies: true + # state=present with list of packages - name: Test addition with list - {{ method }} @@ -287,3 +365,84 @@ that: - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" + +# state=latest with list of packages + +- name: Test state=latest with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: latest_result + +- name: Verify state=latest with list test result - {{ method }} + assert: + that: + - latest_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of state=latest with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: double_latest_result + +- name: Verify idempotency of state=latest with list test result - {{ method }} + assert: + that: + - double_latest_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +- name: Test state=latest with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: latest_result + +- name: Verify state=latest with list partially installed test result - {{ method }} + assert: + that: + - latest_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of state=latest with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: latest + method: "{{ method }}" + no_dependencies: true + register: double_latest_result + +- name: Verify idempotency of state=latest with list partially installed test result - {{ method }} + assert: + that: + - double_latest_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +- name: Cleanup after state=present with list test - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + - com.dummy.App3 + state: absent + method: "{{ method }}" From a5697da29c43d55a03fa12b9d904c302ed60d373 Mon Sep 17 00:00:00 2001 From: desand01 Date: Sun, 21 Apr 2024 14:10:03 -0400 Subject: [PATCH 046/482] Keycloak client role scope (#8252) * first commit * minor update * fixe Copyright * fixe sanity * Update plugins/modules/keycloak_client_rolescope.py Co-authored-by: Felix Fontein * fixe sanity 2 * Update plugins/modules/keycloak_client_rolescope.py Co-authored-by: Felix Fontein --------- Co-authored-by: Andre Desrosiers Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../identity/keycloak/keycloak.py | 102 ++++++ plugins/modules/keycloak_client_rolescope.py | 280 ++++++++++++++++ .../keycloak_client_rolescope/README.md | 20 ++ .../targets/keycloak_client_rolescope/aliases | 5 + .../keycloak_client_rolescope/tasks/main.yml | 317 ++++++++++++++++++ .../keycloak_client_rolescope/vars/main.yml | 26 ++ 7 files changed, 752 insertions(+) create mode 100644 plugins/modules/keycloak_client_rolescope.py create mode 100644 tests/integration/targets/keycloak_client_rolescope/README.md create mode 100644 tests/integration/targets/keycloak_client_rolescope/aliases create mode 100644 tests/integration/targets/keycloak_client_rolescope/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_client_rolescope/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 4089e300db..e21d0c81c1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -780,6 +780,8 @@ files: maintainers: laurpaum $modules/keycloak_component_info.py: maintainers: desand01 + $modules/keycloak_client_rolescope.py: + maintainers: desand01 $modules/keycloak_user_rolemapping.py: maintainers: bratwurzt $modules/keycloak_realm_rolemapping.py: diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 9e1c3f4d93..b2a1892503 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -28,6 +28,9 @@ URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" +URL_CLIENT_ROLE_SCOPE_CLIENTS = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/clients/{scopeid}" +URL_CLIENT_ROLE_SCOPE_REALM = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/realm" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" @@ -3049,6 +3052,105 @@ class KeycloakAPI(object): except Exception: return False + def get_client_role_scope_from_client(self, clientid, clientscopeid, realm="master"): + """ Fetch the roles associated with the client's scope for a specific client on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated roles. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the scope. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Update and fetch the roles associated with the client's scope on the Keycloak server. + :param payload: List of roles to be added to the scope. + :param clientid: ID of the client to update scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, + data=json.dumps(payload), validate_certs=self.validate_certs) + + except Exception as e: + self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def delete_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Delete the roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of roles to be deleted. + :param clientid: ID of the client to delete roles from scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, + data=json.dumps(payload), validate_certs=self.validate_certs) + + except Exception as e: + self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def get_client_role_scope_from_realm(self, clientid, realm="master"): + """ Fetch the realm roles from the client's scope on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated realm roles. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Update and fetch the realm roles from the client's scope on the Keycloak server. + :param payload: List of realm roles to add. + :param clientid: ID of the client to update scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, + data=json.dumps(payload), validate_certs=self.validate_certs) + + except Exception as e: + self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + + def delete_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Delete the realm roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of realm roles to delete. + :param clientid: ID of the client to delete roles from scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, + data=json.dumps(payload), validate_certs=self.validate_certs) + + except Exception as e: + self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + def fail_open_url(self, e, msg, **kwargs): try: if isinstance(e, HTTPError): diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py new file mode 100644 index 0000000000..cca72f0ddd --- /dev/null +++ b/plugins/modules/keycloak_client_rolescope.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_client_rolescope + +short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications. + +version_added: 8.6.0 + +description: + - This module allows you to add or remove Keycloak roles from clients scope via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the role mapping. + - On V(present), all roles in O(role_names) will be mapped if not exists yet. + - On V(absent), all roles mapping in O(role_names) will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - The Keycloak realm under which clients resides. + default: 'master' + + client_id: + type: str + required: true + description: + - Roles provided in O(role_names) while be added to this client scope. + + client_scope_id: + type: str + description: + - If the O(role_names) are client role, the client ID under which it resides. + - If this parameter is absent, the roles are considered a realm role. + role_names: + required: true + type: list + elements: str + description: + - Names of roles to manipulate. + - If O(client_scope_id) is present, all roles must be under this client. + - If O(client_scope_id) is absent, all roles must be under the realm. + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Andre Desrosiers (@desand01) +''' + +EXAMPLES = ''' +- name: Add roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + - backend-role-user + +- name: Remove roles from public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + state: absent + +- name: Add realm roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + role_names: + - realm-role-admin + - realm-role-user +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client role scope for frontend-client-public has been updated" + +end_state: + description: Representation of role role scope after module execution. + returned: on success + type: list + elements: dict + sample: [ + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "47293104-59a6-46f0-b460-2e9e3c9c424c", + "name": "backend-role-admin" + }, + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "39c62a6d-542c-4715-92d2-41021eb33967", + "name": "backend-role-user" + } + ] +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + client_id=dict(type='str', required=True), + client_scope_id=dict(type='str'), + realm=dict(type='str', default='master'), + role_names=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', diff={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + client_scope_id = module.params.get('client_scope_id') + role_names = module.params.get('role_names') + state = module.params.get('state') + + objRealm = kc.get_realm_by_id(realm) + if not objRealm: + module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm)) + + objClient = kc.get_client_by_clientid(clientid, realm) + if not objClient: + module.fail_json(msg="Failed to retrive client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + if objClient["fullScopeAllowed"] and state == "present": + module.fail_json(msg="FullScopeAllowed is active for Client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + + if client_scope_id: + objClientScope = kc.get_client_by_clientid(client_scope_id, realm) + if not objClientScope: + module.fail_json(msg="Failed to retrive client '{realm}.{client_scope_id}'".format(realm=realm, client_scope_id=client_scope_id)) + before_role_mapping = kc.get_client_role_scope_from_client(objClient["id"], objClientScope["id"], realm) + else: + before_role_mapping = kc.get_client_role_scope_from_realm(objClient["id"], realm) + + if client_scope_id: + # retrive all role from client_scope + client_scope_roles_by_name = kc.get_client_roles_by_id(objClientScope["id"], realm) + else: + # retrive all role from realm + client_scope_roles_by_name = kc.get_realm_roles(realm) + + # convert to indexed Dict by name + client_scope_roles_by_name = {role["name"]: role for role in client_scope_roles_by_name} + role_mapping_by_name = {role["name"]: role for role in before_role_mapping} + role_mapping_to_manipulate = [] + + if state == "present": + # update desired + for role_name in role_names: + if role_name not in client_scope_roles_by_name: + if client_scope_id: + module.fail_json(msg="Failed to retrive role '{realm}.{client_scope_id}.{role_name}'" + .format(realm=realm, client_scope_id=client_scope_id, role_name=role_name)) + else: + module.fail_json(msg="Failed to retrive role '{realm}.{role_name}'".format(realm=realm, role_name=role_name)) + if role_name not in role_mapping_by_name: + role_mapping_to_manipulate.append(client_scope_roles_by_name[role_name]) + role_mapping_by_name[role_name] = client_scope_roles_by_name[role_name] + else: + # remove role if present + for role_name in role_names: + if role_name in role_mapping_by_name: + role_mapping_to_manipulate.append(role_mapping_by_name[role_name]) + del role_mapping_by_name[role_name] + + before_role_mapping = sorted(before_role_mapping, key=lambda d: d['name']) + desired_role_mapping = sorted(role_mapping_by_name.values(), key=lambda d: d['name']) + + result['changed'] = len(role_mapping_to_manipulate) > 0 + + if result['changed']: + result['diff'] = dict(before=before_role_mapping, after=desired_role_mapping) + + if not result['changed']: + # no changes + result['end_state'] = before_role_mapping + result['msg'] = "No changes required for client role scope {name}.".format(name=clientid) + elif state == "present": + # doing update + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.update_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.update_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been updated".format(name=clientid) + else: + # doing delete + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.delete_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.delete_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been deleted".format(name=clientid) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/keycloak_client_rolescope/README.md b/tests/integration/targets/keycloak_client_rolescope/README.md new file mode 100644 index 0000000000..cd1152dad8 --- /dev/null +++ b/tests/integration/targets/keycloak_client_rolescope/README.md @@ -0,0 +1,20 @@ + +# Running keycloak_client_rolescope module integration test + +To run Keycloak component info module's integration test, start a keycloak server using Docker: + + docker run -d --rm --name mykeycloak -p 8080:8080 -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:latest start-dev --http-relative-path /auth + +Run integration tests: + + ansible-test integration -v keycloak_client_rolescope --allow-unsupported --docker fedora35 --docker-network host + +Cleanup: + + docker stop mykeycloak + + diff --git a/tests/integration/targets/keycloak_client_rolescope/aliases b/tests/integration/targets/keycloak_client_rolescope/aliases new file mode 100644 index 0000000000..bd1f024441 --- /dev/null +++ b/tests/integration/targets/keycloak_client_rolescope/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +unsupported diff --git a/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml new file mode 100644 index 0000000000..8675c9548d --- /dev/null +++ b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml @@ -0,0 +1,317 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Wait for Keycloak + uri: + url: "{{ url }}/admin/" + status_code: 200 + validate_certs: no + register: result + until: result.status == 200 + retries: 10 + delay: 10 + +- name: Delete realm if exists + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + state: absent + +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create a Keycloak realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + name: "{{ item }}" + realm: "{{ realm }}" + with_items: + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" + +- name: Client private + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_private }}" + state: present + redirect_uris: + - "https://my-backend-api.c.org/" + fullScopeAllowed: True + attributes: '{{client_attributes1}}' + public_client: False + +- name: Create a Keycloak client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + name: "{{ item }}" + realm: "{{ realm }}" + client_id: "{{ client_name_private }}" + with_items: + - "{{ client_role_admin }}" + - "{{ client_role_user }}" + +- name: Client public + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + redirect_uris: + - "https://my-onepage-app-frontend.c.org/" + attributes: '{{client_attributes1}}' + full_scope_allowed: False + public_client: True + + +- name: Map roles to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + client_scope_id: "{{ client_name_private }}" + role_names: + - "{{ client_role_admin }}" + - "{{ client_role_user }}" + register: result + +- name: Assert mapping created + assert: + that: + - result is changed + - result.end_state | length == 2 + +- name: remap role user to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + client_scope_id: "{{ client_name_private }}" + role_names: + - "{{ client_role_user }}" + register: result + +- name: Assert mapping created + assert: + that: + - result is not changed + - result.end_state | length == 2 + +- name: Remove Map role admin to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + client_scope_id: "{{ client_name_private }}" + role_names: + - "{{ client_role_admin }}" + state: absent + register: result + +- name: Assert mapping deleted + assert: + that: + - result is changed + - result.end_state | length == 1 + - result.end_state[0].name == client_role_user + +- name: Map missing roles to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + client_scope_id: "{{ client_name_private }}" + role_names: + - "{{ client_role_admin }}" + - "{{ client_role_not_exists }}" + ignore_errors: true + register: result + +- name: Assert failed mapping missing role + assert: + that: + - result is failed + +- name: Map roles duplicate + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + client_scope_id: "{{ client_name_private }}" + role_names: + - "{{ client_role_admin }}" + - "{{ client_role_admin }}" + register: result + +- name: Assert result + assert: + that: + - result is changed + - result.end_state | length == 2 + +- name: Map roles to private client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_private }}" + role_names: + - "{{ realm_role_admin }}" + ignore_errors: true + register: result + +- name: Assert failed mapping role to full scope client + assert: + that: + - result is failed + +- name: Map realm role to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: + - "{{ realm_role_admin }}" + register: result + +- name: Assert result + assert: + that: + - result is changed + - result.end_state | length == 1 + +- name: Map two realm roles to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" + register: result + +- name: Assert result + assert: + that: + - result is changed + - result.end_state | length == 2 + +- name: Unmap all realm roles to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" + state: absent + register: result + +- name: Assert result + assert: + that: + - result is changed + - result.end_state | length == 0 + +- name: Map missing realm role to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: + - "{{ realm_role_not_exists }}" + ignore_errors: true + register: result + +- name: Assert failed mapping missing realm role + assert: + that: + - result is failed + +- name: Check-mode try to Map realm roles to public client + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" + check_mode: true + register: result + +- name: Assert result + assert: + that: + - result is changed + - result.end_state | length == 2 + +- name: Check-mode step two, check if change where applied + community.general.keycloak_client_rolescope: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_name_public }}" + role_names: [] + register: result + +- name: Assert result + assert: + that: + - result is not changed + - result.end_state | length == 0 \ No newline at end of file diff --git a/tests/integration/targets/keycloak_client_rolescope/vars/main.yml b/tests/integration/targets/keycloak_client_rolescope/vars/main.yml new file mode 100644 index 0000000000..8bd59398b7 --- /dev/null +++ b/tests/integration/targets/keycloak_client_rolescope/vars/main.yml @@ -0,0 +1,26 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm + + +client_name_private: backend-client-private +client_role_admin: client-role-admin +client_role_user: client-role-user +client_role_not_exists: client-role-missing + +client_name_public: frontend-client-public + + +realm_role_admin: realm-role-admin +realm_role_user: realm-role-user +realm_role_not_exists: client-role-missing + + +client_attributes1: {"backchannel.logout.session.required": true, "backchannel.logout.revoke.offline.tokens": false, "client.secret.creation.time": 0} From af1c5dd7853df3f99c7ee2dfec13df00e7c2dc74 Mon Sep 17 00:00:00 2001 From: Maxopoly Date: Sun, 21 Apr 2024 20:10:59 +0200 Subject: [PATCH 047/482] Add accept-new as valid option for ssh_config host key checking (#8257) * Add accept-new as valid option for host key checking * Add changelog fragment for #8257 * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8257-ssh-config-hostkey-support-accept-new.yaml | 2 ++ plugins/modules/ssh_config.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml diff --git a/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml b/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml new file mode 100644 index 0000000000..ca1d61aefd --- /dev/null +++ b/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml @@ -0,0 +1,2 @@ +minor_changes: + - ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` (https://github.com/ansible-collections/community.general/pull/8257). diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index e89e087b39..d974f45373 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -88,7 +88,8 @@ options: strict_host_key_checking: description: - Whether to strictly check the host key when doing connections to the remote host. - choices: [ 'yes', 'no', 'ask' ] + - The value V(accept-new) is supported since community.general 8.6.0. + choices: [ 'yes', 'no', 'ask', 'accept-new' ] type: str proxycommand: description: @@ -370,7 +371,7 @@ def main(): strict_host_key_checking=dict( type='str', default=None, - choices=['yes', 'no', 'ask'] + choices=['yes', 'no', 'ask', 'accept-new'], ), controlmaster=dict(type='str', default=None, choices=['yes', 'no', 'ask', 'auto', 'autoask']), controlpath=dict(type='str', default=None), From 211688ef1b1a157ad6ef2464b3294506fcebdc51 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Apr 2024 21:07:21 +0200 Subject: [PATCH 048/482] apt_rpm: add new states 'latest' and 'present_not_latest' (#8247) * Add new states 'latest' and 'present_not_latest'. * Improve documentation. --- changelogs/fragments/8247-apt_rpm-latest.yml | 6 ++++ plugins/modules/apt_rpm.py | 31 +++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/8247-apt_rpm-latest.yml diff --git a/changelogs/fragments/8247-apt_rpm-latest.yml b/changelogs/fragments/8247-apt_rpm-latest.yml new file mode 100644 index 0000000000..d62fb40340 --- /dev/null +++ b/changelogs/fragments/8247-apt_rpm-latest.yml @@ -0,0 +1,6 @@ +minor_changes: + - "apt_rpm - add new states ``latest`` and ``present_not_latest``. The value ``latest`` is equivalent to the current behavior of + ``present``, which will upgrade a package if a newer version exists. ``present_not_latest`` does what most users would expect ``present`` + to do: it does not upgrade if the package is already installed. The current behavior of ``present`` will be deprecated in a later version, + and eventually changed to that of ``present_not_latest`` + (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247)." diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index de1b574114..3f90365bb3 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -37,7 +37,17 @@ options: state: description: - Indicates the desired package state. - choices: [ absent, present, installed, removed ] + - Please note that V(present) and V(installed) are equivalent to V(latest) right now. + This will change in the future. To simply ensure that a package is installed, without upgrading + it, use the V(present_not_latest) state. + - The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0. + choices: + - absent + - present + - present_not_latest + - installed + - removed + - latest default: present type: str update_cache: @@ -180,7 +190,7 @@ def check_package_version(module, name): return False -def query_package_provides(module, name): +def query_package_provides(module, name, allow_upgrade=False): # rpm -q returns 0 if the package is installed, # 1 if it is not installed if name.endswith('.rpm'): @@ -195,10 +205,11 @@ def query_package_provides(module, name): rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) if rc == 0: + if not allow_upgrade: + return True if check_package_version(module, name): return True - else: - return False + return False def update_package_db(module): @@ -255,14 +266,14 @@ def remove_packages(module, packages): return (False, "package(s) already absent") -def install_packages(module, pkgspec): +def install_packages(module, pkgspec, allow_upgrade=False): if pkgspec is None: return (False, "Empty package list") packages = "" for package in pkgspec: - if not query_package_provides(module, package): + if not query_package_provides(module, package, allow_upgrade=allow_upgrade): packages += "'%s' " % package if len(packages) != 0: @@ -271,7 +282,7 @@ def install_packages(module, pkgspec): installed = True for packages in pkgspec: - if not query_package_provides(module, package): + if not query_package_provides(module, package, allow_upgrade=False): installed = False # apt-rpm always have 0 for exit code if --force is used @@ -286,7 +297,7 @@ def install_packages(module, pkgspec): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), + state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']), update_cache=dict(type='bool', default=False), clean=dict(type='bool', default=False), dist_upgrade=dict(type='bool', default=False), @@ -320,8 +331,8 @@ def main(): output += out packages = p['package'] - if p['state'] in ['installed', 'present']: - (m, out) = install_packages(module, packages) + if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: + (m, out) = install_packages(module, packages, allow_upgrade=p['state'] != 'present_not_latest') modified = modified or m output += out From 17e11d7d7e1ddd6bf3e73536a464aea08fd20084 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 22 Apr 2024 06:42:04 +0200 Subject: [PATCH 049/482] apt_rpm: fix package install check (#8263) Fix package install check. --- changelogs/fragments/8263-apt_rpm-install-check.yml | 2 ++ plugins/modules/apt_rpm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8263-apt_rpm-install-check.yml diff --git a/changelogs/fragments/8263-apt_rpm-install-check.yml b/changelogs/fragments/8263-apt_rpm-install-check.yml new file mode 100644 index 0000000000..ae44616e79 --- /dev/null +++ b/changelogs/fragments/8263-apt_rpm-install-check.yml @@ -0,0 +1,2 @@ +bugfixes: + - "apt_rpm - when checking whether packages were installed after running ``apt-get -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263)." diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index 3f90365bb3..03b87e78f0 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -281,7 +281,7 @@ def install_packages(module, pkgspec, allow_upgrade=False): rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"}) installed = True - for packages in pkgspec: + for package in pkgspec: if not query_package_provides(module, package, allow_upgrade=False): installed = False From be3b66c8b559cb9e1a5c8e20e2c8226d493bf8da Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 22 Apr 2024 18:28:22 +0200 Subject: [PATCH 050/482] [9.0.0] Remove deprecated modules and features (#8198) * Remove deprecated modules. * Update BOTMETA. * Update ignore.txt files. * Bump collection version to 9.0.0. * Change timeout from 10 to 60. * Remove the alias autosubscribe of auto_attach. * Change default of mode from compatibility to new. * Remove deprecated classes. * Remove mh.mixins.deps.DependencyMixin. * Remove flowdock module. * Remove proxmox_default_behavior option. * Remove ack_* options. * Remove deprecated command support. * Change virtualenv behavior. * Fix changelog. * Remove imports of deprecated (and now removed) code. * Fix tests. * Fix sanity tests. * Require Django 4.1. * Use V() instead of C() for values. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * django_manage: improve docs for release 9.0.0 * markup * fix doc notes in cpanm --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Alexei Znamensky --- .github/BOTMETA.yml | 46 - changelogs/fragments/remove_deprecated.yml | 18 + galaxy.yml | 2 +- meta/runtime.yml | 140 +-- plugins/doc_fragments/rackspace.py | 120 --- plugins/module_utils/mh/mixins/deps.py | 41 - plugins/module_utils/mh/module_helper.py | 3 +- plugins/module_utils/module_helper.py | 2 +- plugins/module_utils/rax.py | 334 ------- plugins/module_utils/redhat.py | 240 ----- plugins/modules/ansible_galaxy_install.py | 22 - plugins/modules/cpanm.py | 25 +- plugins/modules/django_manage.py | 92 +- plugins/modules/flowdock.py | 211 ---- plugins/modules/proxmox.py | 44 +- plugins/modules/rax.py | 903 ------------------ plugins/modules/rax_cbs.py | 235 ----- plugins/modules/rax_cbs_attachments.py | 226 ----- plugins/modules/rax_cdb.py | 266 ------ plugins/modules/rax_cdb_database.py | 179 ---- plugins/modules/rax_cdb_user.py | 227 ----- plugins/modules/rax_clb.py | 320 ------- plugins/modules/rax_clb_nodes.py | 291 ------ plugins/modules/rax_clb_ssl.py | 289 ------ plugins/modules/rax_dns.py | 180 ---- plugins/modules/rax_dns_record.py | 358 ------- plugins/modules/rax_facts.py | 152 --- plugins/modules/rax_files.py | 400 -------- plugins/modules/rax_files_objects.py | 556 ----------- plugins/modules/rax_identity.py | 110 --- plugins/modules/rax_keypair.py | 179 ---- plugins/modules/rax_meta.py | 182 ---- plugins/modules/rax_mon_alarm.py | 235 ----- plugins/modules/rax_mon_check.py | 329 ------- plugins/modules/rax_mon_entity.py | 201 ---- plugins/modules/rax_mon_notification.py | 182 ---- plugins/modules/rax_mon_notification_plan.py | 191 ---- plugins/modules/rax_network.py | 146 --- plugins/modules/rax_queue.py | 147 --- plugins/modules/rax_scaling_group.py | 441 --------- plugins/modules/rax_scaling_policy.py | 294 ------ plugins/modules/redfish_command.py | 17 +- plugins/modules/redfish_config.py | 17 +- plugins/modules/redfish_info.py | 17 +- plugins/modules/redhat_subscription.py | 15 +- plugins/modules/stackdriver.py | 228 ----- plugins/modules/webfaction_app.py | 213 ----- plugins/modules/webfaction_db.py | 209 ---- plugins/modules/webfaction_domain.py | 184 ---- plugins/modules/webfaction_mailbox.py | 152 --- plugins/modules/webfaction_site.py | 223 ----- .../targets/module_helper/library/mdepfail.py | 5 +- tests/sanity/ignore-2.13.txt | 3 - tests/sanity/ignore-2.14.txt | 3 - tests/sanity/ignore-2.15.txt | 3 - tests/sanity/ignore-2.16.txt | 3 - tests/sanity/ignore-2.17.txt | 3 - tests/sanity/ignore-2.18.txt | 3 - tests/unit/plugins/modules/test_cpanm.yaml | 5 +- 59 files changed, 143 insertions(+), 9719 deletions(-) create mode 100644 changelogs/fragments/remove_deprecated.yml delete mode 100644 plugins/doc_fragments/rackspace.py delete mode 100644 plugins/module_utils/rax.py delete mode 100644 plugins/modules/flowdock.py delete mode 100644 plugins/modules/rax.py delete mode 100644 plugins/modules/rax_cbs.py delete mode 100644 plugins/modules/rax_cbs_attachments.py delete mode 100644 plugins/modules/rax_cdb.py delete mode 100644 plugins/modules/rax_cdb_database.py delete mode 100644 plugins/modules/rax_cdb_user.py delete mode 100644 plugins/modules/rax_clb.py delete mode 100644 plugins/modules/rax_clb_nodes.py delete mode 100644 plugins/modules/rax_clb_ssl.py delete mode 100644 plugins/modules/rax_dns.py delete mode 100644 plugins/modules/rax_dns_record.py delete mode 100644 plugins/modules/rax_facts.py delete mode 100644 plugins/modules/rax_files.py delete mode 100644 plugins/modules/rax_files_objects.py delete mode 100644 plugins/modules/rax_identity.py delete mode 100644 plugins/modules/rax_keypair.py delete mode 100644 plugins/modules/rax_meta.py delete mode 100644 plugins/modules/rax_mon_alarm.py delete mode 100644 plugins/modules/rax_mon_check.py delete mode 100644 plugins/modules/rax_mon_entity.py delete mode 100644 plugins/modules/rax_mon_notification.py delete mode 100644 plugins/modules/rax_mon_notification_plan.py delete mode 100644 plugins/modules/rax_network.py delete mode 100644 plugins/modules/rax_queue.py delete mode 100644 plugins/modules/rax_scaling_group.py delete mode 100644 plugins/modules/rax_scaling_policy.py delete mode 100644 plugins/modules/stackdriver.py delete mode 100644 plugins/modules/webfaction_app.py delete mode 100644 plugins/modules/webfaction_db.py delete mode 100644 plugins/modules/webfaction_domain.py delete mode 100644 plugins/modules/webfaction_mailbox.py delete mode 100644 plugins/modules/webfaction_site.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index e21d0c81c1..3d09cf4c5b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -532,8 +532,6 @@ files: maintainers: $team_flatpak $modules/flatpak_remote.py: maintainers: $team_flatpak - $modules/flowdock.py: - ignore: mcodd $modules/gandi_livedns.py: maintainers: gthiemonge $modules/gconftool2.py: @@ -1096,46 +1094,6 @@ files: $modules/python_requirements_info.py: ignore: ryansb maintainers: willthames - $modules/rax: - ignore: ryansb sivel - $modules/rax.py: - maintainers: omgjlk sivel - $modules/rax_cbs.py: - maintainers: claco - $modules/rax_cbs_attachments.py: - maintainers: claco - $modules/rax_cdb.py: - maintainers: jails - $modules/rax_cdb_database.py: - maintainers: jails - $modules/rax_cdb_user.py: - maintainers: jails - $modules/rax_clb.py: - maintainers: claco - $modules/rax_clb_nodes.py: - maintainers: neuroid - $modules/rax_clb_ssl.py: - maintainers: smashwilson - $modules/rax_files.py: - maintainers: angstwad - $modules/rax_files_objects.py: - maintainers: angstwad - $modules/rax_identity.py: - maintainers: claco - $modules/rax_mon_alarm.py: - maintainers: smashwilson - $modules/rax_mon_check.py: - maintainers: smashwilson - $modules/rax_mon_entity.py: - maintainers: smashwilson - $modules/rax_mon_notification.py: - maintainers: smashwilson - $modules/rax_mon_notification_plan.py: - maintainers: smashwilson - $modules/rax_network.py: - maintainers: claco omgjlk - $modules/rax_queue.py: - maintainers: claco $modules/read_csv.py: maintainers: dagwieers $modules/redfish_: @@ -1300,8 +1258,6 @@ files: maintainers: farhan7500 gautamphegde $modules/ssh_config.py: maintainers: gaqzi Akasurde - $modules/stackdriver.py: - maintainers: bwhaley $modules/stacki_host.py: labels: stacki_host maintainers: bsanders bbyhuy @@ -1394,8 +1350,6 @@ files: maintainers: $team_wdc $modules/wdc_redfish_info.py: maintainers: $team_wdc - $modules/webfaction_: - maintainers: quentinsf $modules/xattr.py: labels: xattr maintainers: bcoca diff --git a/changelogs/fragments/remove_deprecated.yml b/changelogs/fragments/remove_deprecated.yml new file mode 100644 index 0000000000..e777bf14e2 --- /dev/null +++ b/changelogs/fragments/remove_deprecated.yml @@ -0,0 +1,18 @@ +removed_features: + - "rax* modules, rax module utils, rax docs fragment - the Rackspace modules relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "stackdriver - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "webfaction_* modules - these modules relied on HTTPS APIs that do not exist anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "flowdock - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` option was removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` have been removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198)." + - "proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198)." + - "ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198)." + - "django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` instead, respectively (https://github.com/ansible-collections/community.general/pull/8198)." +deprecated_features: + - "django_manage - the ``ack_venv_creation_deprecation`` option has no more effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198)." +breaking_changes: + - "redfish_command, redfish_config, redfish_info - change the default for ``timeout`` from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198)." + - "cpanm - the default of the ``mode`` option changed from ``compatibility`` to ``new`` (https://github.com/ansible-collections/community.general/pull/8198)." + - "django_manage - the module will now fail if ``virtualenv`` is specified but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198)." + - "django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198)." diff --git a/galaxy.yml b/galaxy.yml index 757e6c907f..397e104ca2 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 8.6.0 +version: 9.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index 27a4bd1ae3..402dfd5fa2 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -57,109 +57,109 @@ plugin_routing: removal_version: 10.0.0 warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. rax_cbs_attachments: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_cbs: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_cdb_database: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_cdb_user: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_cdb: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_clb_nodes: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_clb_ssl: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_clb: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_dns_record: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_dns: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_facts: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_files_objects: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_files: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_identity: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_keypair: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_meta: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_mon_alarm: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_mon_check: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_mon_entity: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_mon_notification_plan: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_mon_notification: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_network: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_queue: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_scaling_group: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rax_scaling_policy: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on the deprecated package pyrax. + warning_text: This module relied on the deprecated package pyrax. rhn_channel: deprecation: removal_version: 10.0.0 @@ -171,9 +171,9 @@ plugin_routing: warning_text: RHN is EOL, please contact the community.general maintainers if still using this; see the module documentation for more details. stackdriver: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore, + warning_text: This module relied on HTTPS APIs that do not exist anymore, and any new development in the direction of providing an alternative should happen in the context of the google.cloud collection. ali_instance_facts: @@ -237,9 +237,9 @@ plugin_routing: docker_volume_info: redirect: community.docker.docker_volume_info flowdock: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. foreman: tombstone: @@ -727,29 +727,29 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use community.general.vertica_info instead. webfaction_app: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. webfaction_db: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. webfaction_domain: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. webfaction_mailbox: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. webfaction_site: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module relies on HTTPS APIs that do not exist anymore and + warning_text: This module relied on HTTPS APIs that do not exist anymore and there is no clear path to update. xenserver_guest_facts: tombstone: @@ -757,9 +757,9 @@ plugin_routing: warning_text: Use community.general.xenserver_guest_info instead. doc_fragments: rackspace: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This doc fragment is used by rax modules, that rely on the deprecated + warning_text: This doc fragment was used by rax modules, that relied on the deprecated package pyrax. _gcp: redirect: community.google._gcp @@ -777,9 +777,9 @@ plugin_routing: redirect: community.postgresql.postgresql module_utils: rax: - deprecation: + tombstone: removal_version: 9.0.0 - warning_text: This module util relies on the deprecated package pyrax. + warning_text: This module util relied on the deprecated package pyrax. docker.common: redirect: community.docker.common docker.swarm: diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py deleted file mode 100644 index f28be777ca..0000000000 --- a/plugins/doc_fragments/rackspace.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Rackspace only documentation fragment - DOCUMENTATION = r''' -options: - api_key: - description: - - Rackspace API key, overrides O(credentials). - type: str - aliases: [ password ] - credentials: - description: - - File to find the Rackspace credentials in. Ignored if O(api_key) and - O(username) are provided. - type: path - aliases: [ creds_file ] - env: - description: - - Environment as configured in C(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - type: str - region: - description: - - Region to create an instance in. - type: str - username: - description: - - Rackspace username, overrides O(credentials). - type: str - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - pyrax -notes: - - The following environment variables can be used, E(RAX_USERNAME), - E(RAX_API_KEY), E(RAX_CREDS_FILE), E(RAX_CREDENTIALS), E(RAX_REGION). - - E(RAX_CREDENTIALS) and E(RAX_CREDS_FILE) point to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating). - - E(RAX_USERNAME) and E(RAX_API_KEY) obviate the use of a credentials file. - - E(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...). -''' - - # Documentation fragment including attributes to enable communication - # of other OpenStack clouds. Not all rax modules support this. - OPENSTACK = r''' -options: - api_key: - type: str - description: - - Rackspace API key, overrides O(credentials). - aliases: [ password ] - auth_endpoint: - type: str - description: - - The URI of the authentication service. - - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/). - credentials: - type: path - description: - - File to find the Rackspace credentials in. Ignored if O(api_key) and - O(username) are provided. - aliases: [ creds_file ] - env: - type: str - description: - - Environment as configured in C(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - identity_type: - type: str - description: - - Authentication mechanism to use, such as rackspace or keystone. - default: rackspace - region: - type: str - description: - - Region to create an instance in. - tenant_id: - type: str - description: - - The tenant ID used for authentication. - tenant_name: - type: str - description: - - The tenant name used for authentication. - username: - type: str - description: - - Rackspace username, overrides O(credentials). - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -deprecated: - removed_in: 9.0.0 - why: This module relies on the deprecated package pyrax. - alternative: Use the Openstack modules instead. -requirements: - - pyrax -notes: - - The following environment variables can be used, E(RAX_USERNAME), - E(RAX_API_KEY), E(RAX_CREDS_FILE), E(RAX_CREDENTIALS), E(RAX_REGION). - - E(RAX_CREDENTIALS) and E(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating). - - E(RAX_USERNAME) and E(RAX_API_KEY) obviate the use of a credentials file. - - E(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...). -''' diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py index 772df8c0e9..666081ccd1 100644 --- a/plugins/module_utils/mh/mixins/deps.py +++ b/plugins/module_utils/mh/mixins/deps.py @@ -7,11 +7,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import traceback - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - class DependencyCtxMgr(object): def __init__(self, name, msg=None): @@ -35,39 +30,3 @@ class DependencyCtxMgr(object): @property def text(self): return self.msg or str(self.exc_val) - - -class DependencyMixin(ModuleHelperBase): - """ - THIS CLASS IS BEING DEPRECATED. - See the deprecation notice in ``DependencyMixin.fail_on_missing_deps()`` below. - - Mixin for mapping module options to running a CLI command with its arguments. - """ - _dependencies = [] - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - if not self._dependencies: - return - self.module.deprecate( - 'The DependencyMixin is being deprecated. ' - 'Modules should use community.general.plugins.module_utils.deps instead.', - version='9.0.0', - collection_name='community.general', - ) - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index c33efb16b9..3390303ce8 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -13,12 +13,11 @@ from ansible.module_utils.common.dict_transformations import dict_merge # (TODO: remove AnsibleModule!) pylint: disable-next=unused-import from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin -class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase): +class ModuleHelper(DeprecateAttrsMixin, VarsMixin, ModuleHelperBase): facts_name = None output_params = () diff_params = () diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 5aa16c057a..4754ec9ad0 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -14,7 +14,7 @@ from ansible_collections.community.general.plugins.module_utils.mh.module_helper ModuleHelper, StateModuleHelper, AnsibleModule ) from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr, DependencyMixin # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.deco import ( cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py deleted file mode 100644 index 6331c0d1be..0000000000 --- a/plugins/module_utils/rax.py +++ /dev/null @@ -1,334 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their own -# license to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -from uuid import UUID - -from ansible.module_utils.six import text_type, binary_type - -FINAL_STATUSES = ('ACTIVE', 'ERROR') -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') - -CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', - 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] -CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', - 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', - 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] - -NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) -PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" -SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" - - -def rax_slugify(value): - """Prepend a key with rax_ and normalize the key name""" - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def rax_clb_node_to_dict(obj): - """Function to convert a CLB Node object to a dict""" - if not obj: - return {} - node = obj.to_dict() - node['id'] = obj.id - node['weight'] = obj.weight - return node - - -def rax_to_dict(obj, obj_type='standard'): - """Generic function to convert a pyrax object to a dict - - obj_type values: - standard - clb - server - - """ - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if obj_type == 'clb' and key == 'nodes': - instance[key] = [] - for node in value: - instance[key].append(rax_clb_node_to_dict(node)) - elif (isinstance(value, list) and len(value) > 0 and - not isinstance(value[0], NON_CALLABLES)): - instance[key] = [] - for item in value: - instance[key].append(rax_to_dict(item)) - elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): - if obj_type == 'server': - if key == 'image': - if not value: - instance['rax_boot_source'] = 'volume' - else: - instance['rax_boot_source'] = 'local' - key = rax_slugify(key) - instance[key] = value - - if obj_type == 'server': - for attr in ['id', 'accessIPv4', 'name', 'status']: - instance[attr] = instance.get(rax_slugify(attr)) - - return instance - - -def rax_find_bootable_volume(module, rax_module, server, exit=True): - """Find a servers bootable volume""" - cs = rax_module.cloudservers - cbs = rax_module.cloud_blockstorage - server_id = rax_module.utils.get_id(server) - volumes = cs.volumes.get_server_volumes(server_id) - bootable_volumes = [] - for volume in volumes: - vol = cbs.get(volume) - if module.boolean(vol.bootable): - bootable_volumes.append(vol) - if not bootable_volumes: - if exit: - module.fail_json(msg='No bootable volumes could be found for ' - 'server %s' % server_id) - else: - return False - elif len(bootable_volumes) > 1: - if exit: - module.fail_json(msg='Multiple bootable volumes found for server ' - '%s' % server_id) - else: - return False - - return bootable_volumes[0] - - -def rax_find_image(module, rax_module, image, exit=True): - """Find a server image by ID or Name""" - cs = rax_module.cloudservers - try: - UUID(image) - except ValueError: - try: - image = cs.images.find(human_id=image) - except (cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch): - try: - image = cs.images.find(name=image) - except (cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - if exit: - module.fail_json(msg='No matching image found (%s)' % - image) - else: - return False - - return rax_module.utils.get_id(image) - - -def rax_find_volume(module, rax_module, name): - """Find a Block storage volume by ID or name""" - cbs = rax_module.cloud_blockstorage - try: - UUID(name) - volume = cbs.get(name) - except ValueError: - try: - volume = cbs.find(name=name) - except rax_module.exc.NotFound: - volume = None - except Exception as e: - module.fail_json(msg='%s' % e) - return volume - - -def rax_find_network(module, rax_module, network): - """Find a cloud network by ID or name""" - cnw = rax_module.cloud_networks - try: - UUID(network) - except ValueError: - if network.lower() == 'public': - return cnw.get_server_networks(PUBLIC_NET_ID) - elif network.lower() == 'private': - return cnw.get_server_networks(SERVICE_NET_ID) - else: - try: - network_obj = cnw.find_network_by_label(network) - except (rax_module.exceptions.NetworkNotFound, - rax_module.exceptions.NetworkLabelNotUnique): - module.fail_json(msg='No matching network found (%s)' % - network) - else: - return cnw.get_server_networks(network_obj) - else: - return cnw.get_server_networks(network) - - -def rax_find_server(module, rax_module, server): - """Find a Cloud Server by ID or name""" - cs = rax_module.cloudservers - try: - UUID(server) - server = cs.servers.get(server) - except ValueError: - servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) - if not servers: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - if len(servers) > 1: - module.fail_json(msg='Multiple servers matched by name, ' - 'try using the Server ID instead') - - # We made it this far, grab the first and hopefully only server - # in the list - server = servers[0] - return server - - -def rax_find_loadbalancer(module, rax_module, loadbalancer): - """Find a Cloud Load Balancer by ID or name""" - clb = rax_module.cloud_loadbalancers - try: - found = clb.get(loadbalancer) - except Exception: - found = [] - for lb in clb.list(): - if loadbalancer == lb.name: - found.append(lb) - - if not found: - module.fail_json(msg='No loadbalancer was matched') - - if len(found) > 1: - module.fail_json(msg='Multiple loadbalancers matched') - - # We made it this far, grab the first and hopefully only item - # in the list - found = found[0] - - return found - - -def rax_argument_spec(): - """Return standard base dictionary used for the argument_spec - argument in AnsibleModule - - """ - return dict( - api_key=dict(type='str', aliases=['password'], no_log=True), - auth_endpoint=dict(type='str'), - credentials=dict(type='path', aliases=['creds_file']), - env=dict(type='str'), - identity_type=dict(type='str', default='rackspace'), - region=dict(type='str'), - tenant_id=dict(type='str'), - tenant_name=dict(type='str'), - username=dict(type='str'), - validate_certs=dict(type='bool', aliases=['verify_ssl']), - ) - - -def rax_required_together(): - """Return the default list used for the required_together argument to - AnsibleModule""" - return [['api_key', 'username']] - - -def setup_rax_module(module, rax_module, region_required=True): - """Set up pyrax in a standard way for all modules""" - rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, - rax_module.USER_AGENT) - - api_key = module.params.get('api_key') - auth_endpoint = module.params.get('auth_endpoint') - credentials = module.params.get('credentials') - env = module.params.get('env') - identity_type = module.params.get('identity_type') - region = module.params.get('region') - tenant_id = module.params.get('tenant_id') - tenant_name = module.params.get('tenant_name') - username = module.params.get('username') - verify_ssl = module.params.get('validate_certs') - - if env is not None: - rax_module.set_environment(env) - - rax_module.set_setting('identity_type', identity_type) - if verify_ssl is not None: - rax_module.set_setting('verify_ssl', verify_ssl) - if auth_endpoint is not None: - rax_module.set_setting('auth_endpoint', auth_endpoint) - if tenant_id is not None: - rax_module.set_setting('tenant_id', tenant_id) - if tenant_name is not None: - rax_module.set_setting('tenant_name', tenant_name) - - try: - username = username or os.environ.get('RAX_USERNAME') - if not username: - username = rax_module.get_setting('keyring_username') - if username: - api_key = 'USE_KEYRING' - if not api_key: - api_key = os.environ.get('RAX_API_KEY') - credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or - os.environ.get('RAX_CREDS_FILE')) - region = (region or os.environ.get('RAX_REGION') or - rax_module.get_setting('region')) - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - - try: - if api_key and username: - if api_key == 'USE_KEYRING': - rax_module.keyring_auth(username, region=region) - else: - rax_module.set_credentials(username, api_key=api_key, - region=region) - elif credentials: - credentials = os.path.expanduser(credentials) - rax_module.set_credential_file(credentials, region=region) - else: - raise Exception('No credentials supplied!') - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - - if region_required and region not in rax_module.regions: - module.fail_json(msg='%s is not a valid region, must be one of: %s' % - (region, ','.join(rax_module.regions))) - - return rax_module - - -def rax_scaling_group_personality_file(module, files): - if not files: - return [] - - results = [] - for rpath, lpath in files.items(): - lpath = os.path.expanduser(lpath) - try: - with open(lpath, 'r') as f: - results.append({ - 'path': rpath, - 'contents': f.read(), - }) - except Exception as e: - module.fail_json(msg='Failed to load %s: %s' % (lpath, str(e))) - return results diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py index 110159ddfc..321386a0a5 100644 --- a/plugins/module_utils/redhat.py +++ b/plugins/module_utils/redhat.py @@ -15,10 +15,8 @@ __metaclass__ = type import os -import re import shutil import tempfile -import types from ansible.module_utils.six.moves import configparser @@ -76,241 +74,3 @@ class RegistrationBase(object): def subscribe(self, **kwargs): raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 9.0.0. - There is no replacement for it; please contact the community.general - maintainers in case you are using it. - """ - - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.config = self._read_config() - self.module = module - self.module.deprecate( - 'The Rhsm class is deprecated with no replacement.', - version='9.0.0', - collection_name='community.general', - ) - - def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): - ''' - Load RHSM configuration from /etc/rhsm/rhsm.conf. - Returns: - * ConfigParser object - ''' - - # Read RHSM defaults ... - cp = configparser.ConfigParser() - cp.read(rhsm_conf) - - # Add support for specifying a default value w/o having to standup some configuration - # Yeah, I know this should be subclassed ... but, oh well - def get_option_default(self, key, default=''): - sect, opt = key.split('.', 1) - if self.has_section(sect) and self.has_option(sect, opt): - return self.get(sect, opt) - else: - return default - - cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) - - return cp - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHN - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--system.hostname'. - for k, v in kwargs.items(): - if re.search(r'^(system|rhsm)_', k): - args.append('--%s=%s' % (k.replace('_', '.'), v)) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHN. - ''' - args = ['subscription-manager', 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, autosubscribe, activationkey): - ''' - Register the current system to the provided RHN server - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'register'] - - # Generate command arguments - if activationkey: - args.append('--activationkey "%s"' % activationkey) - else: - if autosubscribe: - args.append('--autosubscribe') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - - # Do the needful... - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unsubscribe(self): - ''' - Unsubscribe a system from all subscribed channels - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - for pool in available_pools.filter(regexp): - pool.subscribe() - - -class RhsmPool(object): - """ - Convenience class for housing subscription information - - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 9.0.0. - There is no replacement for it; please contact the community.general - maintainers in case you are using it. - """ - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - self.module.deprecate( - 'The RhsmPool class is deprecated with no replacement.', - version='9.0.0', - collection_name='community.general', - ) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 9.0.0. - There is no replacement for it; please contact the community.general - maintainers in case you are using it. - """ - - def __init__(self, module): - self.module = module - self.products = self._load_product_list() - self.module.deprecate( - 'The RhsmPools class is deprecated with no replacement.', - version='9.0.0', - collection_name='community.general', - ) - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self): - """ - Loads list of all available pools for system in data structure - """ - args = "subscription-manager list --available" - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of an output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter(self, regexp='^$'): - ''' - Return a list of RhsmPools whose name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index 3b0a8fd47b..1e2496daed 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -73,16 +73,6 @@ options: - Using O(force=true) is mandatory when downgrading. type: bool default: false - ack_ansible29: - description: - - This option has no longer any effect and will be removed in community.general 9.0.0. - type: bool - default: false - ack_min_ansiblecore211: - description: - - This option has no longer any effect and will be removed in community.general 9.0.0. - type: bool - default: false """ EXAMPLES = """ @@ -202,18 +192,6 @@ class AnsibleGalaxyInstall(ModuleHelper): dest=dict(type='path'), force=dict(type='bool', default=False), no_deps=dict(type='bool', default=False), - ack_ansible29=dict( - type='bool', - default=False, - removed_in_version='9.0.0', - removed_from_collection='community.general', - ), - ack_min_ansiblecore211=dict( - type='bool', - default=False, - removed_in_version='9.0.0', - removed_from_collection='community.general', - ), ), mutually_exclusive=[('name', 'requirements_file')], required_one_of=[('name', 'requirements_file')], diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 20ac3e7149..302f995932 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -68,9 +68,10 @@ options: mode: description: - Controls the module behavior. See notes below for more details. - - Default is V(compatibility) but that behavior is deprecated and will be changed to V(new) in community.general 9.0.0. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. type: str choices: [compatibility, new] + default: new version_added: 3.0.0 name_check: description: @@ -80,12 +81,16 @@ options: notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)." - - "O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. This is the default mode. + - > + O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. + This was the default mode before community.general 9.0.0. O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. - C(cpanm) version specifiers do not work in this mode." - - "O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized." + C(cpanm) version specifiers do not work in this mode. + - > + O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, + a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. + This is the default mode from community.general 9.0.0 onwards. author: - "Franck Cuny (@fcuny)" - "Alexei Znamensky (@russoz)" @@ -150,7 +155,7 @@ class CPANMinus(ModuleHelper): mirror_only=dict(type='bool', default=False), installdeps=dict(type='bool', default=False), executable=dict(type='path'), - mode=dict(type='str', choices=['compatibility', 'new']), + mode=dict(type='str', default='new', choices=['compatibility', 'new']), name_check=dict(type='str') ), required_one_of=[('name', 'from_path')], @@ -168,14 +173,6 @@ class CPANMinus(ModuleHelper): def __init_module__(self): v = self.vars - if v.mode is None: - self.deprecate( - "The default value 'compatibility' for parameter 'mode' is being deprecated " - "and it will be replaced by 'new'", - version="9.0.0", - collection_name="community.general" - ) - v.mode = "compatibility" if v.mode == "compatibility": if v.name_check: self.do_raise("Parameter name_check can only be used with mode=new") diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py index 114ec0353e..352bfe4b50 100644 --- a/plugins/modules/django_manage.py +++ b/plugins/modules/django_manage.py @@ -28,23 +28,16 @@ options: command: description: - The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation. - - > - V(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be - removed in community.general 9.0.0. Use V(clearsessions) instead. - V(collectstatic) - Collects the static files into C(STATIC_ROOT). - V(createcachetable) - Creates the cache tables for use with the database cache backend. - V(flush) - Removes all data from the database. - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database. - V(migrate) - Synchronizes the database state with models and migrations. - - > - V(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7). - This parameter will be removed in community.general 9.0.0. Use V(migrate) instead. - V(test) - Runs tests for all installed apps. - - > - V(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be - removed in community.general 9.0.0. Use V(check) instead. - - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may + - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may prompt for user input should be run with the C(--noinput) flag. + - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. + See note about supported versions of Django. type: str required: true project_path: @@ -69,6 +62,7 @@ options: virtualenv: description: - An optional path to a C(virtualenv) installation to use while running the manage application. + - The virtual environment must exist, otherwise the module will fail. type: path aliases: [virtual_env] apps: @@ -132,31 +126,24 @@ options: aliases: [test_runner] ack_venv_creation_deprecation: description: - - >- - When a O(virtualenv) is set but the virtual environment does not exist, the current behavior is - to create a new virtual environment. That behavior is deprecated and if that case happens it will - generate a deprecation warning. Set this flag to V(true) to suppress the deprecation warning. - - Please note that you will receive no further warning about this being removed until the module - will start failing in such cases from community.general 9.0.0 on. + - This option no longer has any effect since community.general 9.0.0. + - It will be removed from community.general 11.0.0. type: bool version_added: 5.8.0 notes: - > - B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in - community.general version 9.0.0 (estimated to be released in May 2024). - Please notice that Django 4.1 requires Python 3.8 or greater. - - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the O(virtualenv) parameter - is specified. This requirement is deprecated and will be removed in community.general version 9.0.0. - - This module will create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not already - exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0. - - The parameter O(virtualenv) will remain in use, but it will require the specified virtualenv to exist. - The recommended way to create one in Ansible is by using M(ansible.builtin.pip). + B(ATTENTION): Support for Django releases older than 4.1 has been removed in + community.general version 9.0.0. While the module allows for free-form commands + does not verify the version of Django being used, it is B(strongly recommended) + to use a more recent version of Django. + - Please notice that Django 4.1 requires Python 3.8 or greater. + - This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment + does not already exist at the given location. This behavior changed in community.general version 9.0.0. + - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip). - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately. - - To be able to use the V(migrate) command with django versions < 1.7, you must have C(south) installed and added - as an app in your settings. - - To be able to use the V(collectstatic) command, you must have enabled staticfiles in your settings. + - To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings. - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang, for example C(#!/usr/bin/env python), for invoking the appropriate Python interpreter. seealso: @@ -169,7 +156,7 @@ seealso: - name: What Python version can I use with Django? description: From the Django FAQ, the response to Python requirements for the framework. link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django -requirements: [ "virtualenv", "django" ] +requirements: [ "django >= 4.1" ] author: - Alexei Znamensky (@russoz) - Scott Anderson (@tastychutney) @@ -178,7 +165,7 @@ author: EXAMPLES = """ - name: Run cleanup on the application installed in django_dir community.general.django_manage: - command: cleanup + command: clearsessions project_path: "{{ django_dir }}" - name: Load the initial_data fixture into the application @@ -189,7 +176,7 @@ EXAMPLES = """ - name: Run syncdb on the application community.general.django_manage: - command: syncdb + command: migrate project_path: "{{ django_dir }}" settings: "{{ settings_app_name }}" pythonpath: "{{ settings_dir }}" @@ -233,22 +220,7 @@ def _ensure_virtualenv(module): activate = os.path.join(vbin, 'activate') if not os.path.exists(activate): - # In version 9.0.0, if the venv is not found, it should fail_json() here. - if not module.params['ack_venv_creation_deprecation']: - module.deprecate( - 'The behavior of "creating the virtual environment when missing" is being ' - 'deprecated and will be removed in community.general version 9.0.0. ' - 'Set the module parameter `ack_venv_creation_deprecation: true` to ' - 'prevent this message from showing up when creating a virtualenv.', - version='9.0.0', - collection_name='community.general', - ) - - virtualenv = module.get_bin_path('virtualenv', True) - vcmd = [virtualenv, venv_param] - rc, out_venv, err_venv = module.run_command(vcmd) - if rc != 0: - _fail(module, vcmd, out_venv, err_venv) + module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param) os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) os.environ["VIRTUAL_ENV"] = venv_param @@ -266,11 +238,6 @@ def loaddata_filter_output(line): return "Installed" in line and "Installed 0 object" not in line -def syncdb_filter_output(line): - return ("Creating table " in line) \ - or ("Installed" in line and "Installed 0 object" not in line) - - def migrate_filter_output(line): return ("Migrating forwards " in line) \ or ("Installed" in line and "Installed 0 object" not in line) \ @@ -283,13 +250,10 @@ def collectstatic_filter_output(line): def main(): command_allowed_param_map = dict( - cleanup=(), createcachetable=('cache_table', 'database', ), flush=('database', ), loaddata=('database', 'fixtures', ), - syncdb=('database', ), test=('failfast', 'testrunner', 'apps', ), - validate=(), migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('clear', 'link', ), ) @@ -301,7 +265,6 @@ def main(): # forces --noinput on every command that needs it noinput_commands = ( 'flush', - 'syncdb', 'migrate', 'test', 'collectstatic', @@ -333,7 +296,7 @@ def main(): skip=dict(type='bool'), merge=dict(type='bool'), link=dict(type='bool'), - ack_venv_creation_deprecation=dict(type='bool'), + ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'), ), ) @@ -342,21 +305,6 @@ def main(): project_path = module.params['project_path'] virtualenv = module.params['virtualenv'] - try: - _deprecation = dict( - cleanup="clearsessions", - syncdb="migrate", - validate="check", - ) - module.deprecate( - 'The command {0} has been deprecated as it is no longer supported in recent Django versions.' - 'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]), - version='9.0.0', - collection_name='community.general' - ) - except KeyError: - pass - for param in specific_params: value = module.params[param] if value and param not in command_allowed_param_map[command_bin]: diff --git a/plugins/modules/flowdock.py b/plugins/modules/flowdock.py deleted file mode 100644 index 0e8a7461da..0000000000 --- a/plugins/modules/flowdock.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: flowdock -author: "Matt Coddington (@mcodd)" -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - token: - type: str - description: - - API token. - required: true - type: - type: str - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - type: str - description: - - Content of the message - required: true - tags: - type: str - description: - - tags of the message, separated by commas - required: false - external_user_name: - type: str - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - type: str - description: - - (inbox only - required) Email address of the message sender - required: false - source: - type: str - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - type: str - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - type: str - description: - - (inbox only) Name of the message sender - required: false - reply_to: - type: str - description: - - (inbox only) Email address for replies - required: false - project: - type: str - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - type: str - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: true - type: bool - -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Send a message to a flowdock - community.general.flowdock: - type: inbox - token: AAAAAA - from_address: user@example.com - source: my cool app - msg: test from ansible - subject: test subject - -- name: Send a message to a flowdock - community.general.flowdock: - type: chat - token: AAAAAA - external_user_name: testuser - msg: test from ansible - tags: tag1,tag2,tag3 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox", "chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="external_user_name is required for the 'chat' type") - - # required params for the 'inbox' type - for item in ['from_address', 'source', 'subject']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in ['from_name', 'reply_to', 'project', 'link']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 47f3faa4f2..742c87c3c1 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -15,7 +15,7 @@ short_description: Management of instances in Proxmox VE cluster description: - Allows you to create/delete/stop instances in Proxmox VE cluster. - The module automatically detects containerization type (lxc for PVE 4, openvz for older). - - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior). + - Since community.general 4.0.0 on, there are no more default values. attributes: check_mode: support: none @@ -47,28 +47,23 @@ options: comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(3). - Should not be used in conjunction with O(storage). type: str cores: description: - Specify number of cores per socket. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1). type: int cpus: description: - numbers of allocated cpus for instance - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1). type: int memory: description: - memory size in MB for instance - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512). type: int swap: description: - swap memory size in MB for instance - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0). type: int netif: description: @@ -101,7 +96,6 @@ options: onboot: description: - specifies whether a VM will be started during system bootup - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). type: bool storage: description: @@ -120,7 +114,6 @@ options: cpuunits: description: - CPU weight for a VM - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000). type: int nameserver: description: @@ -200,25 +193,6 @@ options: - The special value V(host) configures the same timezone used by Proxmox host. type: str version_added: '7.1.0' - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value V(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is V(no_defaults), - which makes sure these options have no defaults. - - This affects the O(disk), O(cores), O(cpus), O(memory), O(onboot), O(swap), and O(cpuunits) options. - - > - This parameter is now B(deprecated) and it will be removed in community.general 10.0.0. - By then, the module's behavior should be to not set default values, equivalent to V(no_defaults). - If a consistent set of defaults is needed, the playbook or role should be responsible for setting it. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" clone: description: - ID of the container to be cloned. @@ -785,8 +759,6 @@ def main(): description=dict(type='str'), hookscript=dict(type='str'), timezone=dict(type='str'), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults'], - removed_in_version='9.0.0', removed_from_collection='community.general'), clone=dict(type='int'), clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), tags=dict(type='list', elements='str') @@ -827,20 +799,6 @@ def main(): timeout = module.params['timeout'] clone = module.params['clone'] - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - disk="3", - cores=1, - cpus=1, - memory=512, - swap=0, - onboot=False, - cpuunits=1000, - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - # If vmid not set get the Next VM id from ProxmoxAPI # If hostname is set get the VM id from ProxmoxAPI if not vmid and state == 'present': diff --git a/plugins/modules/rax.py b/plugins/modules/rax.py deleted file mode 100644 index 76e4299447..0000000000 --- a/plugins/modules/rax.py +++ /dev/null @@ -1,903 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax -short_description: Create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the O(group) attribute - or meta key. - type: bool - default: true - boot_from_volume: - description: - - Whether or not to boot the instance from a Cloud Block Storage volume. - If V(true) and O(image) is specified a new volume will be created at - boot time. O(boot_volume_size) is required with O(image) to create a - new volume at boot time. - type: bool - default: false - boot_volume: - type: str - description: - - Cloud Block Storage ID or Name to use as the boot volume of the - instance - boot_volume_size: - type: int - description: - - Size of the volume to create in Gigabytes. This is only required with - O(image) and O(boot_from_volume). - default: 100 - boot_volume_terminate: - description: - - Whether the O(boot_volume) or newly created volume from O(image) will - be terminated when the server is terminated - type: bool - default: false - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: false - count: - type: int - description: - - number of instances to launch - default: 1 - count_offset: - type: int - description: - - number count to start at - default: 1 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified it will assume the value V(auto). - choices: - - auto - - manual - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present. If specified as V(true) and O(count) is less than - the servers matched, servers will be deleted to match the count. If - the number of matched servers is fewer than specified in O(count) - additional servers will be added. - type: bool - default: false - extra_client_args: - type: dict - default: {} - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - extra_create_args: - type: dict - default: {} - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - files: - type: dict - default: {} - description: - - Files to insert into the instance. remotefilename:localcontent - flavor: - type: str - description: - - flavor to use for the instance - group: - type: str - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name). - With O(boot_from_volume), a Cloud Block Storage volume will be created - with this image - instance_ids: - type: list - elements: str - description: - - list of instance ids, currently only used when state='absent' to - remove instances - key_name: - type: str - description: - - key pair to use on the instance - aliases: - - keypair - meta: - type: dict - default: {} - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the instance - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - O(config_drive). Can be a file path or a string - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Jesse Keating (@omgjlk)" - - "Matt Martz (@sivel)" -notes: - - O(exact_count) can be "destructive" if the number of running servers in - the O(group) is larger than that specified in O(count). In such a case, the - O(state) is effectively set to V(absent) and the extra servers are deleted. - In the case of deletion, the returned data structure will have RV(ignore:action) - set to V(delete), and the oldest servers in the group will be deleted. -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: false - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - key_name: my_rackspace_key - files: - /root/test.txt: /home/localuser/test.txt - wait: true - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: false - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: true - group: test - wait: true - register: rax -''' - -import json -import os -import re -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, - rax_find_image, rax_find_network, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.six import string_types - - -def rax_find_server_image(module, server, image, boot_volume): - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - return None - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - return None - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - return None - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: - return None - - return server.image - - -def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, - files=None, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=None, extra_create_args=None, user_data=None, - config_drive=False, existing=None, block_device_mapping_v2=None): - names = [] if names is None else names - meta = {} if meta is None else meta - files = {} if files is None else files - nics = [] if nics is None else nics - extra_create_args = {} if extra_create_args is None else extra_create_args - existing = [] if existing is None else existing - block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 - - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(os.path.expanduser(user_data)): - try: - user_data = os.path.expanduser(user_data) - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - bdmv2 = block_device_mapping_v2 - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - block_device_mapping_v2=bdmv2, - **extra_create_args)) - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): - instance_ids = [] if instance_ids is None else instance_ids - kept = [] if kept is None else kept - - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except Exception: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta=None, key_name=None, files=None, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=None, - exact_count=False, networks=None, count_offset=0, - auto_increment=False, extra_create_args=None, user_data=None, - config_drive=False, boot_from_volume=False, - boot_volume=None, boot_volume_size=None, - boot_volume_terminate=False): - meta = {} if meta is None else meta - files = {} if files is None else files - instance_ids = [] if instance_ids is None else instance_ids - networks = [] if networks is None else networks - extra_create_args = {} if extra_create_args is None else extra_create_args - - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present' or (state == 'absent' and instance_ids is None): - if not boot_from_volume and not boot_volume and not image: - module.fail_json(msg='image is required for the "rax" module') - - for arg, value in dict(name=name, flavor=flavor).items(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - if boot_from_volume and not image and not boot_volume: - module.fail_json(msg='image or boot_volume are required for the ' - '"rax" with boot_from_volume') - - if boot_from_volume and image and not boot_volume_size: - module.fail_json(msg='boot_volume_size is required for the "rax" ' - 'module with boot_from_volume and image') - - if boot_from_volume and image and boot_volume: - image = None - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - - if auto_increment: - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: # Not auto incrementing - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - # available_numbers not needed here, we inspect auto_increment - # again later - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - # We have more servers than we need, set state='absent' - # and delete the extras, this should delete the oldest - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - # we have fewer servers than we need - if auto_increment: - # auto incrementing server numbers - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - # We are not auto incrementing server numbers, - # create a list of 'name' that matches how many we need - names = [name] * (count - len(servers)) - else: - # we have the right number of servers, just return info - # about all of the matched servers - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: # not called with exact_count=True - if group is not None: - if auto_increment: - # we are auto incrementing server numbers, but not with - # exact_count - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - # Not auto incrementing - names = [name] * count - else: - # No group was specified, and not using exact_count - # Perform more simplistic matching - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - # We have more servers than were requested, don't do - # anything. Not running with exact_count=True, so we assume - # more is OK - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - # We need more servers to reach out target, create names for - # them, we aren't performing auto_increment here - names = [name] * (count - len(servers)) - - block_device_mapping_v2 = [] - if boot_from_volume: - mapping = { - 'boot_index': '0', - 'delete_on_termination': boot_volume_terminate, - 'destination_type': 'volume', - } - if image: - mapping.update({ - 'uuid': image, - 'source_type': 'image', - 'volume_size': boot_volume_size, - }) - image = None - elif boot_volume: - volume = rax_find_volume(module, pyrax, boot_volume) - mapping.update({ - 'uuid': pyrax.utils.get_id(volume), - 'source_type': 'volume', - }) - block_device_mapping_v2.append(mapping) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers, - block_device_mapping_v2=block_device_mapping_v2) - - elif state == 'absent': - if instance_ids is None: - # We weren't given an explicit list of server IDs to delete - # Let's match instead - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if meta != server.metadata: - continue - - servers.append(server) - - # Build a list of server IDs to delete - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - # No server IDs were matched for deletion, or no IDs were - # explicitly provided, just exit and don't do anything - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - boot_from_volume=dict(default=False, type='bool'), - boot_volume=dict(type='str'), - boot_volume_size=dict(type='int', default=100), - boot_volume_terminate=dict(type='bool', default=False), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list', elements='str'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', elements='str', default=['public', 'private']), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - auto_increment = module.params.get('auto_increment') - boot_from_volume = module.params.get('boot_from_volume') - boot_volume = module.params.get('boot_volume') - boot_volume_size = module.params.get('boot_volume_size') - boot_volume_terminate = module.params.get('boot_volume_terminate') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive, boot_from_volume=boot_from_volume, - boot_volume=boot_volume, boot_volume_size=boot_volume_size, - boot_volume_terminate=boot_volume_terminate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_cbs.py b/plugins/modules/rax_cbs.py deleted file mode 100644 index 77e7cebad4..0000000000 --- a/plugins/modules/rax_cbs.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - description: - type: str - description: - - Description to give the volume being created. - image: - type: str - description: - - Image to use for bootable volumes. Can be an C(id), C(human_id) or - C(name). This option requires C(pyrax>=1.9.3). - meta: - type: dict - default: {} - description: - - A hash of metadata to associate with the volume. - name: - type: str - description: - - Name to give the volume being created. - required: true - size: - type: int - description: - - Size of the volume to create in Gigabytes. - default: 100 - snapshot_id: - type: str - description: - - The id of the snapshot to create the volume from. - state: - type: str - description: - - Indicate desired state of the resource. - choices: - - present - - absent - default: present - volume_type: - type: str - description: - - Type of the volume being created. - choices: - - SATA - - SSD - default: SATA - wait: - description: - - Wait for the volume to be in state C(available) before returning. - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds. - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: false - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: true - state: present - meta: - app: my-cool-app - register: my_volume -''' - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image): - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if image: - # pyrax<1.9.3 did not have support for specifying an image when - # creating a volume which is required for bootable volumes - if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): - module.fail_json(msg='Creating a bootable volume requires ' - 'pyrax>=1.9.3') - image = rax_find_image(module, pyrax, image) - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - kwargs = dict() - if image: - kwargs['image'] = image - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id, **kwargs) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - instance = rax_to_dict(volume) - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - instance = rax_to_dict(volume) - try: - volume.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(type='str'), - image=dict(type='str'), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - image = module.params.get('image') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_cbs_attachments.py b/plugins/modules/rax_cbs_attachments.py deleted file mode 100644 index 00b860a90f..0000000000 --- a/plugins/modules/rax_cbs_attachments.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - device: - type: str - description: - - The device path to attach the volume to, e.g. /dev/xvde. - - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. - volume: - type: str - description: - - Name or id of the volume to attach/detach - required: true - server: - type: str - description: - - Name or id of the server to attach/detach - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: false - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: true - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, - rax_argument_spec, - rax_find_server, - rax_find_volume, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).items(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - volume.get() - result['volume'] = rax_to_dict(volume) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - result = dict(changed=changed, volume=rax_to_dict(volume)) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=False), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_cdb.py b/plugins/modules/rax_cdb.py deleted file mode 100644 index 9538579fad..0000000000 --- a/plugins/modules/rax_cdb.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: Create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - type: str - description: - - Name of the databases server instance - required: true - flavor: - type: int - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - type: int - description: - - Volume size of the database 1-150GB - default: 2 - cdb_type: - type: str - description: - - type of instance (i.e. MySQL, MariaDB, Percona) - default: MySQL - aliases: ['type'] - cdb_version: - type: str - description: - - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) - - "The available choices are: V(5.1), V(5.6) and V(10)." - default: '5.6' - aliases: ['version'] - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: false - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - cdb_type: MySQL - cdb_version: 5.6 - wait: true - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume, type=cdb_type, version=cdb_version - ).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume, - type=cdb_type, version=cdb_version) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - cdb_type=dict(type='str', default='MySQL', aliases=['type']), - cdb_version=dict(type='str', default='5.6', aliases=['version']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - cdb_type = module.params.get('cdb_type') - cdb_version = module.params.get('cdb_version') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_cdb_database.py b/plugins/modules/rax_cdb_database.py deleted file mode 100644 index b0db11814d..0000000000 --- a/plugins/modules/rax_cdb_database.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: Create / delete a database in the Cloud Databases -description: - - create / delete a database in the Cloud Databases. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - cdb_id: - type: str - description: - - The databases server UUID - required: true - name: - type: str - description: - - Name to give to the database - required: true - character_set: - type: str - description: - - Set of symbols and encodings - default: 'utf8' - collate: - type: str - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete', - database=rax_to_dict(database)) - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_cdb_user.py b/plugins/modules/rax_cdb_user.py deleted file mode 100644 index 6ee86c4fe2..0000000000 --- a/plugins/modules/rax_cdb_user.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: Create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - cdb_id: - type: str - description: - - The databases server UUID - required: true - db_username: - type: str - description: - - Name of the database user - required: true - db_password: - type: str - description: - - Database user password - required: true - databases: - type: list - elements: str - description: - - Name of the databases that the user can access - default: [] - host: - type: str - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', elements='str', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = to_text(module.params.get('host'), errors='surrogate_or_strict') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_clb.py b/plugins/modules/rax_clb.py deleted file mode 100644 index 23c795f395..0000000000 --- a/plugins/modules/rax_clb.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: Create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - algorithm: - type: str - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - type: dict - default: {} - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the load balancer - required: true - port: - type: int - description: - - Port for the balancer being created - default: 80 - protocol: - type: str - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - type: int - description: - - timeout for communication between the balancer and the node - default: 30 - type: - type: str - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - type: str - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - wait: - description: - - wait for the balancer to be in state 'running' before returning - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: false - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: true - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, - CLB_PROTOCOLS, - rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - balancer_list = clb.list() - while balancer_list: - retrieved = clb.list(marker=balancer_list.pop().id) - balancer_list.extend(retrieved) - if len(retrieved) < 2: - break - - for balancer in balancer_list: - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.items(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(required=True), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_clb_nodes.py b/plugins/modules/rax_clb_nodes.py deleted file mode 100644 index c076dced74..0000000000 --- a/plugins/modules/rax_clb_nodes.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: Add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - address: - type: str - required: false - description: - - IP address or domain name of the node - condition: - type: str - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - type: int - required: true - description: - - Load balancer id - node_id: - type: int - required: false - description: - - Node id - port: - type: int - required: false - description: - - Port number of the load balanced service on the node - state: - type: str - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - type: str - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: false - type: bool - description: - - Wait for the load balancer to become active before returning - wait_timeout: - type: int - required: false - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - type: int - required: false - description: - - Weight of node - virtualenv: - type: path - description: - - Virtualenv to execute this module in -author: "Lukasz Kawczynski (@neuroid)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Add a new node to the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: true - credentials: /path/to/credentials - -- name: Drain connections from a node - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: true - credentials: /path/to/credentials - -- name: Remove a node from the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: true - credentials: /path/to/credentials -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module - - -def _activate_virtualenv(path): - activate_this = os.path.join(path, 'bin', 'activate_this.py') - with open(activate_this) as f: - code = compile(f.read(), activate_this, 'exec') - exec(code) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(type='path'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError as e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name in list(mutable): - value = mutable[name] - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_clb_ssl.py b/plugins/modules/rax_clb_ssl.py deleted file mode 100644 index b794130cfa..0000000000 --- a/plugins/modules/rax_clb_ssl.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_clb_ssl -short_description: Manage SSL termination for a Rackspace Cloud Load Balancer -description: -- Set up, reconfigure, or remove SSL termination for an existing load balancer. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - loadbalancer: - type: str - description: - - Name or ID of the load balancer on which to manage SSL termination. - required: true - state: - type: str - description: - - If set to "present", SSL termination will be added to this load balancer. - - If "absent", SSL termination will be removed instead. - choices: - - present - - absent - default: present - enabled: - description: - - If set to "false", temporarily disable SSL termination without discarding - - existing credentials. - default: true - type: bool - private_key: - type: str - description: - - The private SSL key as a string in PEM format. - certificate: - type: str - description: - - The public SSL certificates as a string in PEM format. - intermediate_certificate: - type: str - description: - - One or more intermediate certificate authorities as a string in PEM - - format, concatenated into a single string. - secure_port: - type: int - description: - - The port to listen for secure traffic. - default: 443 - secure_traffic_only: - description: - - If "true", the load balancer will *only* accept secure traffic. - default: false - type: bool - https_redirect: - description: - - If "true", the load balancer will redirect HTTP traffic to HTTPS. - - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL - - termination is also applied or removed. - type: bool - wait: - description: - - Wait for the balancer to be in state "running" before turning. - default: false - type: bool - wait_timeout: - type: int - description: - - How long before "wait" gives up, in seconds. - default: 300 -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Enable SSL termination on a load balancer - community.general.rax_clb_ssl: - loadbalancer: the_loadbalancer - state: present - private_key: "{{ lookup('file', 'credentials/server.key' ) }}" - certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" - intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" - secure_traffic_only: true - wait: true - -- name: Disable SSL termination - community.general.rax_clb_ssl: - loadbalancer: "{{ registered_lb.balancer.id }}" - state: absent - wait: true -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, - certificate, intermediate_certificate, secure_port, - secure_traffic_only, https_redirect, - wait, wait_timeout): - # Validate arguments. - - if state == 'present': - if not private_key: - module.fail_json(msg="private_key must be provided.") - else: - private_key = private_key.strip() - - if not certificate: - module.fail_json(msg="certificate must be provided.") - else: - certificate = certificate.strip() - - attempts = wait_timeout // 5 - - # Locate the load balancer. - - balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) - existing_ssl = balancer.get_ssl_termination() - - changed = False - - if state == 'present': - # Apply or reconfigure SSL termination on the load balancer. - ssl_attrs = dict( - securePort=secure_port, - privatekey=private_key, - certificate=certificate, - intermediateCertificate=intermediate_certificate, - enabled=enabled, - secureTrafficOnly=secure_traffic_only - ) - - needs_change = False - - if existing_ssl: - for ssl_attr, value in ssl_attrs.items(): - if ssl_attr == 'privatekey': - # The private key is not included in get_ssl_termination's - # output (as it shouldn't be). Also, if you're changing the - # private key, you'll also be changing the certificate, - # so we don't lose anything by not checking it. - continue - - if value is not None and existing_ssl.get(ssl_attr) != value: - # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) - needs_change = True - else: - needs_change = True - - if needs_change: - try: - balancer.add_ssl_termination(**ssl_attrs) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - elif state == 'absent': - # Remove SSL termination if it's already configured. - if existing_ssl: - try: - balancer.delete_ssl_termination() - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if https_redirect is not None and balancer.httpsRedirect != https_redirect: - if changed: - # This wait is unavoidable because load balancers are immutable - # while the SSL termination changes above are being applied. - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - try: - balancer.update(httpsRedirect=https_redirect) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if changed and wait: - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - new_ssl_termination = balancer.get_ssl_termination() - - # Intentionally omit the private key from the module output, so you don't - # accidentally echo it with `ansible-playbook -v` or `debug`, and the - # certificate, which is just long. Convert other attributes to snake_case - # and include https_redirect at the top-level. - if new_ssl_termination: - new_ssl = dict( - enabled=new_ssl_termination['enabled'], - secure_port=new_ssl_termination['securePort'], - secure_traffic_only=new_ssl_termination['secureTrafficOnly'] - ) - else: - new_ssl = None - - result = dict( - changed=changed, - https_redirect=balancer.httpsRedirect, - ssl_termination=new_ssl, - balancer=rax_to_dict(balancer, 'clb') - ) - success = True - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - success = False - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - success = False - - if success: - module.exit_json(**result) - else: - module.fail_json(**result) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update(dict( - loadbalancer=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(type='bool', default=True), - private_key=dict(no_log=True), - certificate=dict(), - intermediate_certificate=dict(), - secure_port=dict(type='int', default=443), - secure_traffic_only=dict(type='bool', default=False), - https_redirect=dict(type='bool'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module.') - - loadbalancer = module.params.get('loadbalancer') - state = module.params.get('state') - enabled = module.boolean(module.params.get('enabled')) - private_key = module.params.get('private_key') - certificate = module.params.get('certificate') - intermediate_certificate = module.params.get('intermediate_certificate') - secure_port = module.params.get('secure_port') - secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) - https_redirect = module.boolean(module.params.get('https_redirect')) - wait = module.boolean(module.params.get('wait')) - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_load_balancer_ssl( - module, loadbalancer, state, enabled, private_key, certificate, - intermediate_certificate, secure_port, secure_traffic_only, - https_redirect, wait, wait_timeout - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_dns.py b/plugins/modules/rax_dns.py deleted file mode 100644 index 31782cd882..0000000000 --- a/plugins/modules/rax_dns.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - email: - type: str - description: - - Email address of the domain administrator - name: - type: str - description: - - Domain name to create - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: false - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_dns_record.py b/plugins/modules/rax_dns_record.py deleted file mode 100644 index cb3cd279ef..0000000000 --- a/plugins/modules/rax_dns_record.py +++ /dev/null @@ -1,358 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - data: - type: str - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: true - domain: - type: str - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - type: str - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - name: - type: str - description: - - FQDN record name to create - required: true - overwrite: - description: - - Add new records if data doesn't match, instead of updating existing - record with matching name. If there are already multiple records with - matching name and overwrite=true, this module will fail. - default: true - type: bool - priority: - type: int - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - type: str - description: - - Server ID to create a PTR record for. Only used with type=PTR - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of record in seconds - default: 3600 - type: - type: str - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API." - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied. -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: false - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_find_server, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - overwrite=True, priority=None, record_type='A', - state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - if overwrite: - record = domain.find_record(record_type, name=name) - else: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='overwrite=true and there are multiple matching records') - except pyrax.exceptions.DomainRecordNotFound as e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound as e: - record = {} - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - overwrite=dict(type='bool', default=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - overwrite = module.params.get('overwrite') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, overwrite=overwrite, priority=priority, - record_type=record_type, state=state, ttl=ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_facts.py b/plugins/modules/rax_facts.py deleted file mode 100644 index f8bb0e0506..0000000000 --- a/plugins/modules/rax_facts.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix -options: - address: - type: str - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to retrieve facts for - name: - type: str - description: - - Server name to retrieve facts for -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - - community.general.attributes.facts - - community.general.attributes.facts_module - -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: false - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - ansible.builtin.set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - servers[:] = [server for server in servers if server.status != "DELETED"] - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - supports_check_mode=True, - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_files.py b/plugins/modules/rax_files.py deleted file mode 100644 index a63e107eb4..0000000000 --- a/plugins/modules/rax_files.py +++ /dev/null @@ -1,400 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - type: bool - default: false - container: - type: str - description: - - The container to use for container or metadata operations. - meta: - type: dict - default: {} - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - type: bool - default: false - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - type: bool - default: false - region: - type: str - description: - - Region to create an instance in - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent', 'list'] - default: present - ttl: - type: int - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - type: str - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - container - - meta - default: container - web_error: - type: str - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - type: str - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: false - tasks: - - name: "List all containers" - community.general.rax_files: - state: list - - - name: "Create container called 'mycontainer'" - community.general.rax_files: - container: mycontainer - - - name: "Create container 'mycontainer2' with metadata" - community.general.rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - community.general.rax_files: - container: mycontainer - web_index: index.html - - - name: "Set a container's web error page" - community.general.rax_files: - container: mycontainer - web_error: error.html - - - name: "Make container public" - community.general.rax_files: - container: mycontainer - public: true - - - name: "Make container public with a 24 hour TTL" - community.general.rax_files: - container: mycontainer - public: true - ttl: 86400 - - - name: "Make container private" - community.general.rax_files: - container: mycontainer - private: true - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: false - tasks: - - name: "Get mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - community.general.rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError as e: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception as e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer as e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_files_objects.py b/plugins/modules/rax_files_objects.py deleted file mode 100644 index bbcdfe4f80..0000000000 --- a/plugins/modules/rax_files_objects.py +++ /dev/null @@ -1,556 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting O(type=meta). - type: bool - default: false - container: - type: str - description: - - The container to use for file object operations. - required: true - dest: - type: str - description: - - The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder). - Used to specify the destination of an operation on a remote object; i.e. a file name, - V(file1), or a comma-separated list of remote objects, V(file1,file2,file17). - expires: - type: int - description: - - Used to set an expiration in seconds on an uploaded file or folder. - meta: - type: dict - default: {} - description: - - Items to set as metadata values on an uploaded file or folder. - method: - type: str - description: - - > - The method of operation to be performed: V(put) to upload files, V(get) to download files or - V(delete) to remove remote objects in Cloud Files. - choices: - - get - - put - - delete - default: get - src: - type: str - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, V(file1), or a comma-separated list of remote objects, - V(file1,file2,file17). Parameters O(src) and O(dest) are mutually exclusive on remote-only object operations - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - type: bool - default: true - type: - type: str - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: false - tasks: - - name: "Get objects from test container" - community.general.rax_files_objects: - container: testcont - dest: ~/Downloads/testcont - - - name: "Get single object from test container" - community.general.rax_files_objects: - container: testcont - src: file1 - dest: ~/Downloads/testcont - - - name: "Get several objects from test container" - community.general.rax_files_objects: - container: testcont - src: file1,file2,file3 - dest: ~/Downloads/testcont - - - name: "Delete one object in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file1 - - - name: "Delete several objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file2,file3,file4 - - - name: "Delete all objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - - - name: "Upload all files to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/onehundred - - - name: "Upload one file to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - community.general.rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file3 - expires: 60 - - - name: "Attempt to get remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: get - src: FileThatDoesNotExist.jpg - dest: ~/Downloads/testcont - ignore_errors: true - - - name: "Attempt to delete remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: delete - dest: FileThatDoesNotExist.jpg - ignore_errors: true - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file2 - - - name: "Get metadata on several objects" - community.general.rax_files_objects: - container: testcont - type: meta - src: file2,file1 - - - name: "Set metadata on an object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - community.general.rax_files_objects: - container: testcont - type: meta - src: file17 - - - name: "Delete metadata" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - community.general.rax_files_objects: - container: testcont - type: meta -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _upload_folder(cf, folder, container, ttl=None, headers=None): - """ Uploads a folder to Cloud Files. - """ - total_bytes = 0 - for root, dummy, files in os.walk(folder): - for fname in files: - full_path = os.path.join(root, fname) - obj_name = os.path.relpath(full_path, folder) - obj_size = os.path.getsize(full_path) - cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) - total_bytes += obj_size - return total_bytes - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - if not src: - module.fail_json(msg='src must be specified when uploading') - - c = _get_container(module, cf, container) - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - total_bytes = 0 - try: - if dest and not is_dir: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) - elif is_dir: - total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) - else: - cont_obj = c.upload_file(src, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or total_bytes > 0: - EXIT_DICT['changed'] = True - if meta: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = map(str.strip, src.split(',')) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - try: - results = [c.download_object(obj, dest, structure=structure) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - - c = _get_container(module, cf, container) - - objs = dest or src - if objs: - objs = map(str.strip, objs.split(',')) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - try: - results = [c.delete_object(obj) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - - c = _get_container(module, cf, container) - - objs = dest or src - if objs: - objs = map(str.strip, objs.split(',')) - else: - objs = c.get_object_names() - - try: - results = dict() - for obj in objs: - meta = c.get_object(obj).get_metadata() - results[obj] = dict((k.split(META_PREFIX)[-1], v) for k, v in meta.items()) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - objs = dest or src - objs = map(str.strip, objs.split(',')) - - c = _get_container(module, cf, container) - - try: - results = [c.get_object(obj).set_metadata(meta, clear=clear_meta) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - objs = dest or src - objs = map(str.strip, objs.split(',')) - - c = _get_container(module, cf, container) - - try: - for obj in objs: - o = c.get_object(obj) - results = [ - o.remove_metadata_key(k) - for k in (meta or o.get_metadata()) - ] - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'get': - download(module, cf, container, src, dest, structure) - - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - if method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_identity.py b/plugins/modules/rax_identity.py deleted file mode 100644 index b2eb156273..0000000000 --- a/plugins/modules/rax_identity.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present'] - default: present - required: false -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: false - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def cloud_identity(module, state, identity): - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if not pyrax.identity: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_keypair.py b/plugins/modules/rax_keypair.py deleted file mode 100644 index d7d7a2cc34..0000000000 --- a/plugins/modules/rax_keypair.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - type: str - description: - - Name of keypair - required: true - public_key: - type: str - description: - - Public Key string to upload. Can be a file path or string - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: false - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: false - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if public_key and os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except Exception: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_meta.py b/plugins/modules/rax_meta.py deleted file mode 100644 index 7b52e906fe..0000000000 --- a/plugins/modules/rax_meta.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - address: - type: str - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to modify metadata for - name: - type: str - description: - - Server name to modify metadata for - meta: - type: dict - default: {} - description: - - A hash of metadata to associate with the instance -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: false - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -import json - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module -from ansible.module_utils.six import string_types - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_mon_alarm.py b/plugins/modules/rax_mon_alarm.py deleted file mode 100644 index b66611a90f..0000000000 --- a/plugins/modules/rax_mon_alarm.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_alarm -short_description: Create or delete a Rackspace Cloud Monitoring alarm -description: -- Create or delete a Rackspace Cloud Monitoring alarm that associates an - existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with - criteria that specify what conditions will trigger which levels of - notifications. Rackspace monitoring module flow | rax_mon_entity -> - rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> - *rax_mon_alarm* -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Ensure that the alarm with this O(label) exists or does not exist. - choices: [ "present", "absent" ] - required: false - default: present - label: - type: str - description: - - Friendly name for this alarm, used to achieve idempotence. Must be a String - between 1 and 255 characters long. - required: true - entity_id: - type: str - description: - - ID of the entity this alarm is attached to. May be acquired by registering - the value of a rax_mon_entity task. - required: true - check_id: - type: str - description: - - ID of the check that should be alerted on. May be acquired by registering - the value of a rax_mon_check task. - required: true - notification_plan_id: - type: str - description: - - ID of the notification plan to trigger if this alarm fires. May be acquired - by registering the value of a rax_mon_notification_plan task. - required: true - criteria: - type: str - description: - - Alarm DSL that describes alerting conditions and their output states. Must - be between 1 and 16384 characters long. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html - for a reference on the alerting language. - disabled: - description: - - If yes, create this alarm, but leave it in an inactive state. Defaults to - no. - type: bool - default: false - metadata: - type: dict - description: - - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String - keys and values between 1 and 255 characters long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Alarm example - gather_facts: false - hosts: local - connection: local - tasks: - - name: Ensure that a specific alarm exists. - community.general.rax_mon_alarm: - credentials: ~/.rax_pub - state: present - label: uhoh - entity_id: "{{ the_entity['entity']['id'] }}" - check_id: "{{ the_check['check']['id'] }}" - notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" - criteria: > - if (rate(metric['average']) > 10) { - return new AlarmStatus(WARNING); - } - return new AlarmStatus(OK); - register: the_alarm -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, - disabled, metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - if criteria and len(criteria) < 1 or len(criteria) > 16384: - module.fail_json(msg='criteria must be between 1 and 16384 characters long') - - # Coerce attributes. - - changed = False - alarm = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [a for a in cm.list_alarms(entity_id) if a.label == label] - - if existing: - alarm = existing[0] - - if state == 'present': - should_create = False - should_update = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s existing alarms have the label %s.' % - (len(existing), label)) - - if alarm: - if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: - should_delete = should_create = True - - should_update = (disabled and disabled != alarm.disabled) or \ - (metadata and metadata != alarm.metadata) or \ - (criteria and criteria != alarm.criteria) - - if should_update and not should_delete: - cm.update_alarm(entity=entity_id, alarm=alarm, - criteria=criteria, disabled=disabled, - label=label, metadata=metadata) - changed = True - - if should_delete: - alarm.delete() - changed = True - else: - should_create = True - - if should_create: - alarm = cm.create_alarm(entity=entity_id, check=check_id, - notification_plan=notification_plan_id, - criteria=criteria, disabled=disabled, label=label, - metadata=metadata) - changed = True - else: - for a in existing: - a.delete() - changed = True - - if alarm: - alarm_dict = { - "id": alarm.id, - "label": alarm.label, - "check_id": alarm.check_id, - "notification_plan_id": alarm.notification_plan_id, - "criteria": alarm.criteria, - "disabled": alarm.disabled, - "metadata": alarm.metadata - } - module.exit_json(changed=changed, alarm=alarm_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - entity_id=dict(required=True), - check_id=dict(required=True), - notification_plan_id=dict(required=True), - criteria=dict(), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - entity_id = module.params.get('entity_id') - check_id = module.params.get('check_id') - notification_plan_id = module.params.get('notification_plan_id') - criteria = module.params.get('criteria') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - alarm(module, state, label, entity_id, check_id, notification_plan_id, - criteria, disabled, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_mon_check.py b/plugins/modules/rax_mon_check.py deleted file mode 100644 index 253c26dcf5..0000000000 --- a/plugins/modules/rax_mon_check.py +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_check -short_description: Create or delete a Rackspace Cloud Monitoring check for an - existing entity. -description: -- Create or delete a Rackspace Cloud Monitoring check associated with an - existing rax_mon_entity. A check is a specific test or measurement that is - performed, possibly from different monitoring zones, on the systems you - monitor. Rackspace monitoring module flow | rax_mon_entity -> - *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> - rax_mon_alarm -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Ensure that a check with this O(label) exists or does not exist. - choices: ["present", "absent"] - default: present - entity_id: - type: str - description: - - ID of the rax_mon_entity to target with this check. - required: true - label: - type: str - description: - - Defines a label for this check, between 1 and 64 characters long. - required: true - check_type: - type: str - description: - - The type of check to create. C(remote.) checks may be created on any - rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities - that have a non-null C(agent_id). - - | - Choices for this option are: - - V(remote.dns) - - V(remote.ftp-banner) - - V(remote.http) - - V(remote.imap-banner) - - V(remote.mssql-banner) - - V(remote.mysql-banner) - - V(remote.ping) - - V(remote.pop3-banner) - - V(remote.postgresql-banner) - - V(remote.smtp-banner) - - V(remote.smtp) - - V(remote.ssh) - - V(remote.tcp) - - V(remote.telnet-banner) - - V(agent.filesystem) - - V(agent.memory) - - V(agent.load_average) - - V(agent.cpu) - - V(agent.disk) - - V(agent.network) - - V(agent.plugin) - required: true - monitoring_zones_poll: - type: str - description: - - Comma-separated list of the names of the monitoring zones the check should - run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, - mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. - target_hostname: - type: str - description: - - One of O(target_hostname) and O(target_alias) is required for remote.* checks, - but prohibited for agent.* checks. The hostname this check should target. - Must be a valid IPv4, IPv6, or FQDN. - target_alias: - type: str - description: - - One of O(target_alias) and O(target_hostname) is required for remote.* checks, - but prohibited for agent.* checks. Use the corresponding key in the entity's - C(ip_addresses) hash to resolve an IP address to target. - details: - type: dict - default: {} - description: - - Additional details specific to the check type. Must be a hash of strings - between 1 and 255 characters long, or an array or object containing 0 to - 256 items. - disabled: - description: - - If V(true), ensure the check is created, but don't actually use it yet. - type: bool - default: false - metadata: - type: dict - default: {} - description: - - Hash of arbitrary key-value pairs to accompany this check if it fires. - Keys and values must be strings between 1 and 255 characters long. - period: - type: int - description: - - The number of seconds between each time the check is performed. Must be - greater than the minimum period set on your account. - timeout: - type: int - description: - - The number of seconds this check will wait when attempting to collect - results. Must be less than the period. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Create a monitoring check - gather_facts: false - hosts: local - connection: local - tasks: - - name: Associate a check with an existing entity. - community.general.rax_mon_check: - credentials: ~/.rax_pub - state: present - entity_id: "{{ the_entity['entity']['id'] }}" - label: the_check - check_type: remote.ping - monitoring_zones_poll: mziad,mzord,mzdfw - details: - count: 10 - meta: - hurf: durf - register: the_check -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout): - - # Coerce attributes. - - if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): - monitoring_zones_poll = [monitoring_zones_poll] - - if period: - period = int(period) - - if timeout: - timeout = int(timeout) - - changed = False - check = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - entity = cm.get_entity(entity_id) - if not entity: - module.fail_json(msg='Failed to instantiate entity. "%s" may not be' - ' a valid entity id.' % entity_id) - - existing = [e for e in entity.list_checks() if e.label == label] - - if existing: - check = existing[0] - - if state == 'present': - if len(existing) > 1: - module.fail_json(msg='%s existing checks have a label of %s.' % - (len(existing), label)) - - should_delete = False - should_create = False - should_update = False - - if check: - # Details may include keys set to default values that are not - # included in the initial creation. - # - # Only force a recreation of the check if one of the *specified* - # keys is missing or has a different value. - if details: - for (key, value) in details.items(): - if key not in check.details: - should_delete = should_create = True - elif value != check.details[key]: - should_delete = should_create = True - - should_update = label != check.label or \ - (target_hostname and target_hostname != check.target_hostname) or \ - (target_alias and target_alias != check.target_alias) or \ - (disabled != check.disabled) or \ - (metadata and metadata != check.metadata) or \ - (period and period != check.period) or \ - (timeout and timeout != check.timeout) or \ - (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) - - if should_update and not should_delete: - check.update(label=label, - disabled=disabled, - metadata=metadata, - monitoring_zones_poll=monitoring_zones_poll, - timeout=timeout, - period=period, - target_alias=target_alias, - target_hostname=target_hostname) - changed = True - else: - # The check doesn't exist yet. - should_create = True - - if should_delete: - check.delete() - - if should_create: - check = cm.create_check(entity, - label=label, - check_type=check_type, - target_hostname=target_hostname, - target_alias=target_alias, - monitoring_zones_poll=monitoring_zones_poll, - details=details, - disabled=disabled, - metadata=metadata, - period=period, - timeout=timeout) - changed = True - elif state == 'absent': - if check: - check.delete() - changed = True - else: - module.fail_json(msg='state must be either present or absent.') - - if check: - check_dict = { - "id": check.id, - "label": check.label, - "type": check.type, - "target_hostname": check.target_hostname, - "target_alias": check.target_alias, - "monitoring_zones_poll": check.monitoring_zones_poll, - "details": check.details, - "disabled": check.disabled, - "metadata": check.metadata, - "period": check.period, - "timeout": check.timeout - } - module.exit_json(changed=changed, check=check_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - entity_id=dict(required=True), - label=dict(required=True), - check_type=dict(required=True), - monitoring_zones_poll=dict(), - target_hostname=dict(), - target_alias=dict(), - details=dict(type='dict', default={}), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict', default={}), - period=dict(type='int'), - timeout=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - entity_id = module.params.get('entity_id') - label = module.params.get('label') - check_type = module.params.get('check_type') - monitoring_zones_poll = module.params.get('monitoring_zones_poll') - target_hostname = module.params.get('target_hostname') - target_alias = module.params.get('target_alias') - details = module.params.get('details') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - period = module.params.get('period') - timeout = module.params.get('timeout') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_mon_entity.py b/plugins/modules/rax_mon_entity.py deleted file mode 100644 index fbad9f98fc..0000000000 --- a/plugins/modules/rax_mon_entity.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_entity -short_description: Create or delete a Rackspace Cloud Monitoring entity -description: -- Create or delete a Rackspace Cloud Monitoring entity, which represents a device - to monitor. Entities associate checks and alarms with a target system and - provide a convenient, centralized place to store IP addresses. Rackspace - monitoring module flow | *rax_mon_entity* -> rax_mon_check -> - rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - label: - type: str - description: - - Defines a name for this entity. Must be a non-empty string between 1 and - 255 characters long. - required: true - state: - type: str - description: - - Ensure that an entity with this C(name) exists or does not exist. - choices: ["present", "absent"] - default: present - agent_id: - type: str - description: - - Rackspace monitoring agent on the target device to which this entity is - bound. Necessary to collect C(agent.) rax_mon_checks against this entity. - named_ip_addresses: - type: dict - default: {} - description: - - Hash of IP addresses that may be referenced by name by rax_mon_checks - added to this entity. Must be a dictionary of with keys that are names - between 1 and 64 characters long, and values that are valid IPv4 or IPv6 - addresses. - metadata: - type: dict - default: {} - description: - - Hash of arbitrary C(name), C(value) pairs that are passed to associated - rax_mon_alarms. Names and values must all be between 1 and 255 characters - long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Entity example - gather_facts: false - hosts: local - connection: local - tasks: - - name: Ensure an entity exists - community.general.rax_mon_entity: - credentials: ~/.rax_pub - state: present - label: my_entity - named_ip_addresses: - web_box: 192.0.2.4 - db_box: 192.0.2.5 - meta: - hurf: durf - register: the_entity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, - metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for entity in cm.list_entities(): - if label == entity.label: - existing.append(entity) - - entity = None - - if existing: - entity = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing entities have the label %s.' % - (len(existing), label)) - - if entity: - if named_ip_addresses and named_ip_addresses != entity.ip_addresses: - should_delete = should_create = True - - # Change an existing Entity, unless there's nothing to do. - should_update = agent_id and agent_id != entity.agent_id or \ - (metadata and metadata != entity.metadata) - - if should_update and not should_delete: - entity.update(agent_id, metadata) - changed = True - - if should_delete: - entity.delete() - else: - should_create = True - - if should_create: - # Create a new Entity. - entity = cm.create_entity(label=label, agent=agent_id, - ip_addresses=named_ip_addresses, - metadata=metadata) - changed = True - else: - # Delete the existing Entities. - for e in existing: - e.delete() - changed = True - - if entity: - entity_dict = { - "id": entity.id, - "name": entity.name, - "agent_id": entity.agent_id, - } - module.exit_json(changed=changed, entity=entity_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - agent_id=dict(), - named_ip_addresses=dict(type='dict', default={}), - metadata=dict(type='dict', default={}) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - agent_id = module.params.get('agent_id') - named_ip_addresses = module.params.get('named_ip_addresses') - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_mon_notification.py b/plugins/modules/rax_mon_notification.py deleted file mode 100644 index 7539f2a378..0000000000 --- a/plugins/modules/rax_mon_notification.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification -short_description: Create or delete a Rackspace Cloud Monitoring notification -description: -- Create or delete a Rackspace Cloud Monitoring notification that specifies a - channel that can be used to communicate alarms, such as email, webhooks, or - PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> - *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Ensure that the notification with this O(label) exists or does not exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification. String between 1 and 255 - characters long. - required: true - notification_type: - type: str - description: - - A supported notification type. - choices: ["webhook", "email", "pagerduty"] - required: true - details: - type: dict - description: - - Dictionary of key-value pairs used to initialize the notification. - Required keys and meanings vary with notification type. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ - service-notification-types-crud.html for details. - required: true -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Monitoring notification example - gather_facts: false - hosts: local - connection: local - tasks: - - name: Email me when something goes wrong. - rax_mon_entity: - credentials: ~/.rax_pub - label: omg - type: email - details: - address: me@mailhost.com - register: the_notification -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification(module, state, label, notification_type, details): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notifications(): - if n.label == label: - existing.append(n) - - if existing: - notification = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing notifications are labelled %s.' % - (len(existing), label)) - - if notification: - should_delete = (notification_type != notification.type) - - should_update = (details != notification.details) - - if should_update and not should_delete: - notification.update(details=notification.details) - changed = True - - if should_delete: - notification.delete() - else: - should_create = True - - if should_create: - notification = cm.create_notification(notification_type, - label=label, details=details) - changed = True - else: - for n in existing: - n.delete() - changed = True - - if notification: - notification_dict = { - "id": notification.id, - "type": notification.type, - "label": notification.label, - "details": notification.details - } - module.exit_json(changed=changed, notification=notification_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), - details=dict(required=True, type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - notification_type = module.params.get('notification_type') - details = module.params.get('details') - - setup_rax_module(module, pyrax) - - notification(module, state, label, notification_type, details) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_mon_notification_plan.py b/plugins/modules/rax_mon_notification_plan.py deleted file mode 100644 index 31647304b9..0000000000 --- a/plugins/modules/rax_mon_notification_plan.py +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification_plan -short_description: Create or delete a Rackspace Cloud Monitoring notification - plan. -description: -- Create or delete a Rackspace Cloud Monitoring notification plan by - associating existing rax_mon_notifications with severity levels. Rackspace - monitoring module flow | rax_mon_entity -> rax_mon_check -> - rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Ensure that the notification plan with this O(label) exists or does not - exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification plan. String between 1 and - 255 characters long. - required: true - critical_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is CRITICAL. Must be an - array of valid rax_mon_notification ids. - warning_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is WARNING. Must be an array - of valid rax_mon_notification ids. - ok_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is OK. Must be an array of - valid rax_mon_notification ids. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Example notification plan - gather_facts: false - hosts: local - connection: local - tasks: - - name: Establish who gets called when. - community.general.rax_mon_notification_plan: - credentials: ~/.rax_pub - state: present - label: defcon1 - critical_state: - - "{{ everyone['notification']['id'] }}" - warning_state: - - "{{ opsfloor['notification']['id'] }}" - register: defcon1 -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification_plan(module, state, label, critical_state, warning_state, ok_state): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification_plan = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notification_plans(): - if n.label == label: - existing.append(n) - - if existing: - notification_plan = existing[0] - - if state == 'present': - should_create = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s notification plans are labelled %s.' % - (len(existing), label)) - - if notification_plan: - should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ - (warning_state and warning_state != notification_plan.warning_state) or \ - (ok_state and ok_state != notification_plan.ok_state) - - if should_delete: - notification_plan.delete() - should_create = True - else: - should_create = True - - if should_create: - notification_plan = cm.create_notification_plan(label=label, - critical_state=critical_state, - warning_state=warning_state, - ok_state=ok_state) - changed = True - else: - for np in existing: - np.delete() - changed = True - - if notification_plan: - notification_plan_dict = { - "id": notification_plan.id, - "critical_state": notification_plan.critical_state, - "warning_state": notification_plan.warning_state, - "ok_state": notification_plan.ok_state, - "metadata": notification_plan.metadata - } - module.exit_json(changed=changed, notification_plan=notification_plan_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - critical_state=dict(type='list', elements='str'), - warning_state=dict(type='list', elements='str'), - ok_state=dict(type='list', elements='str'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - critical_state = module.params.get('critical_state') - warning_state = module.params.get('warning_state') - ok_state = module.params.get('ok_state') - - setup_rax_module(module, pyrax) - - notification_plan(module, state, label, critical_state, warning_state, ok_state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_network.py b/plugins/modules/rax_network.py deleted file mode 100644 index 22f148366e..0000000000 --- a/plugins/modules/rax_network.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_network -short_description: Create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - type: str - description: - - Label (name) to give the network - required: true - cidr: - type: str - description: - - cidr of the network being created -author: - - "Christopher H. Laco (@claco)" - - "Jesse Keating (@omgjlk)" -extends_documentation_fragment: - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: false - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_network(module, state, label, cidr): - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not cidr: - module.fail_json(msg='missing required arguments: cidr') - - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(required=True), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_queue.py b/plugins/modules/rax_queue.py deleted file mode 100644 index 00f730b279..0000000000 --- a/plugins/modules/rax_queue.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: Create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - type: str - description: - - Name to give the queue - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: false - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_scaling_group.py b/plugins/modules/rax_scaling_group.py deleted file mode 100644 index f4bb790255..0000000000 --- a/plugins/modules/rax_scaling_group.py +++ /dev/null @@ -1,441 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: false - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified, it will fallback to V(auto). - choices: - - auto - - manual - files: - type: dict - default: {} - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - flavor: - type: str - description: - - flavor to use for the instance - required: true - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name). - required: true - key_name: - type: str - description: - - key pair to use on the instance - loadbalancers: - type: list - elements: dict - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - type: int - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - type: dict - default: {} - description: - - A hash of metadata to associate with the instance - min_entities: - type: int - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - type: str - description: - - Name to give the scaling group - required: true - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - type: str - description: - - The base name for servers created by Autoscale - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - O(config_drive). Can be a file path or a string - wait: - description: - - wait for the scaling group to finish provisioning the minimum amount of - servers - type: bool - default: false - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -import base64 -import json -import os -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import ( - rax_argument_spec, rax_find_image, rax_find_network, - rax_required_together, rax_to_dict, setup_rax_module, - rax_scaling_group_personality_file, -) -from ansible.module_utils.six import string_types - - -def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, - image=None, key_name=None, loadbalancers=None, meta=None, - min_entities=0, max_entities=0, name=None, networks=None, - server_name=None, state='present', user_data=None, - config_drive=False, wait=True, wait_timeout=300): - files = {} if files is None else files - loadbalancers = [] if loadbalancers is None else loadbalancers - meta = {} if meta is None else meta - networks = [] if networks is None else networks - - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = rax_scaling_group_personality_file(module, files) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - try: - lb_id = int(lb.get('id')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer ID is not an integer: ' - '%s' % lb.get('id')) - try: - port = int(lb.get('port')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer port is not an ' - 'integer: %s' % lb.get('port')) - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=personality, - networks=nics, load_balancers=lbs, - key_name=key_name, config_drive=config_drive, - user_data=user_data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['server_name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - disk_config = disk_config or 'AUTO' - if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config', 'AUTO')): - lc_args['disk_config'] = disk_config - - if (meta or lc.get('meta')) and meta != lc.get('metadata'): - lc_args['metadata'] = meta - - test_personality = [] - for p in personality: - test_personality.append({ - 'path': p['path'], - 'contents': base64.b64encode(p['contents']) - }) - if ((test_personality or lc.get('personality')) and - test_personality != lc.get('personality')): - lc_args['personality'] = personality - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if config_drive != lc.get('config_drive', False): - lc_args['config_drive'] = config_drive - - if (user_data and - base64.b64encode(user_data) != lc.get('user_data')): - lc_args['user_data'] = user_data - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - state = sg.get_state() - if state["pending_capacity"] == 0: - break - - time.sleep(5) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound as e: - sg = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - config_drive=dict(default=False, type='bool'), - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='dict', default={}), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list', elements='dict'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', elements='str', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - config_drive = module.params.get('config_drive') - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - user_data = module.params.get('user_data') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state, config_drive=config_drive, user_data=user_data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rax_scaling_policy.py b/plugins/modules/rax_scaling_policy.py deleted file mode 100644 index 2869a69105..0000000000 --- a/plugins/modules/rax_scaling_policy.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - at: - type: str - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - V(2013-05-19T08:07:08Z) - change: - type: int - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - O(is_percent) to V(true) also. - cron: - type: str - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to V(1 0 * * *). - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - desired_capacity: - type: int - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in O(change) is a percent value - default: false - type: bool - name: - type: str - description: - - Name to give the policy - required: true - policy_type: - type: str - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - type: str - description: - - Name of the scaling group that this policy will be added to - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -extends_documentation_fragment: - - community.general.rackspace - - community.general.rackspace.openstack - - community.general.attributes - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index 06224235a8..d351e7c1d8 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -109,9 +109,10 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this param is C(10) but that is being deprecated - and it will be replaced with C(60) in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) + in community.general 9.0.0. type: int + default: 60 boot_override_mode: description: - Boot mode when using an override. @@ -805,7 +806,7 @@ def main(): update_username=dict(type='str', aliases=["account_updatename"]), account_properties=dict(type='dict', default={}), bootdevice=dict(), - timeout=dict(type='int'), + timeout=dict(type='int', default=60), uefi_target=dict(), boot_next=dict(), boot_override_mode=dict(choices=['Legacy', 'UEFI']), @@ -854,16 +855,6 @@ def main(): supports_check_mode=False ) - if module.params['timeout'] is None: - timeout = 10 - module.deprecate( - 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format( - 10, 60 - ), - version='9.0.0', - collection_name='community.general' - ) - category = module.params['category'] command_list = module.params['command'] diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 1fea9e7cd1..129b33b2e6 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -64,9 +64,10 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this param is C(10) but that is being deprecated - and it will be replaced with C(60) in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) + in community.general 9.0.0. type: int + default: 60 boot_order: required: false description: @@ -384,7 +385,7 @@ def main(): password=dict(no_log=True), auth_token=dict(no_log=True), bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int'), + timeout=dict(type='int', default=60), boot_order=dict(type='list', elements='str', default=[]), network_protocols=dict( type='dict', @@ -418,16 +419,6 @@ def main(): supports_check_mode=False ) - if module.params['timeout'] is None: - timeout = 10 - module.deprecate( - 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format( - 10, 60 - ), - version='9.0.0', - collection_name='community.general' - ) - category = module.params['category'] command_list = module.params['command'] diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index 0b39bb6fa8..3b594b7a2c 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -63,9 +63,10 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this param is C(10) but that is being deprecated - and it will be replaced with C(60) in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) + in community.general 9.0.0. type: int + default: 60 update_handle: required: false description: @@ -407,7 +408,7 @@ def main(): username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int'), + timeout=dict(type='int', default=60), update_handle=dict(), manager=dict(), ), @@ -423,16 +424,6 @@ def main(): supports_check_mode=True, ) - if module.params['timeout'] is None: - timeout = 10 - module.deprecate( - 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format( - 10, 60 - ), - version='9.0.0', - collection_name='community.general' - ) - # admin credentials used for authentication creds = {'user': module.params['username'], 'pswd': module.params['password'], diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index d4b47d5d50..4a7aac483e 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -123,10 +123,9 @@ options: description: - Upon successful registration, auto-consume available subscriptions - | - Please note that the alias O(autosubscribe) will be removed in + Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0. type: bool - aliases: [autosubscribe] activationkey: description: - supply an activation key for use with registration @@ -1106,17 +1105,7 @@ def main(): 'server_port': {}, 'rhsm_baseurl': {}, 'rhsm_repo_ca_cert': {}, - 'auto_attach': { - 'type': 'bool', - 'aliases': ['autosubscribe'], - 'deprecated_aliases': [ - { - 'name': 'autosubscribe', - 'version': '9.0.0', - 'collection_name': 'community.general', - }, - ], - }, + 'auto_attach': {'type': 'bool'}, 'activationkey': {'no_log': True}, 'org_id': {}, 'environment': {}, diff --git a/plugins/modules/stackdriver.py b/plugins/modules/stackdriver.py deleted file mode 100644 index 35b2b0dc16..0000000000 --- a/plugins/modules/stackdriver.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -author: "Ben Whaley (@bwhaley)" -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - key: - type: str - description: - - API key. - required: true - event: - type: str - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: true - revision_id: - type: str - description: - - The revision of the code that was deployed. Required for deploy events - deployed_by: - type: str - description: - - The person or robot responsible for deploying the code - default: "Ansible" - deployed_to: - type: str - description: - - "The environment code was deployed to. (ie: development, staging, production)" - repository: - type: str - description: - - The repository (or project) deployed - msg: - type: str - description: - - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. - annotated_by: - type: str - description: - - The person or robot who the annotation should be attributed to. - default: "Ansible" - level: - type: str - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - default: 'INFO' - instance_id: - type: str - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - event_epoch: - type: str - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." -''' - -EXAMPLES = ''' -- name: Send a code deploy event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: deploy - deployed_to: production - deployed_by: leeroyjenkins - repository: MyWebApp - revision_id: abcd123 - -- name: Send an annotation event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: annotation - msg: Greetings from Ansible - annotated_by: leeroyjenkins - level: WARN - instance_id: i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( # @TODO add types - key=dict(required=True, no_log=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), # @TODO int? - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception as e: - module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), - exception=traceback.format_exc()) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception as e: - module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), - exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/webfaction_app.py b/plugins/modules/webfaction_app.py deleted file mode 100644 index 81bfc8b686..0000000000 --- a/plugins/modules/webfaction_app.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Create a Webfaction application using Ansible and the Webfaction API -# -# Valid application types can be found by looking here: -# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: webfaction_app -short_description: Add or remove applications on a Webfaction host -description: - - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as - your host, you may want to add C(serial=1) to the plays. - - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info. - -extends_documentation_fragment: - - community.general.attributes - -attributes: - check_mode: - support: full - diff_mode: - support: none - -options: - name: - description: - - The name of the application - required: true - type: str - - state: - description: - - Whether the application should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. - required: true - type: str - - autostart: - description: - - Whether the app should restart with an C(autostart.cgi) script - type: bool - default: false - - extra_info: - description: - - Any extra parameters required by the app - default: '' - type: str - - port_open: - description: - - IF the port should be opened - type: bool - default: false - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str - -''' - -EXAMPLES = ''' - - name: Create a test app - community.general.webfaction_app: - name: "my_wsgi_app1" - state: present - type: mod_wsgi35-python27 - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - type=dict(required=True), - autostart=dict(type='bool', default=False), - extra_info=dict(default=""), - port_open=dict(type='bool', default=False), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(), - ), - supports_check_mode=True - ) - app_name = module.params['name'] - app_type = module.params['type'] - app_state = module.params['state'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - app_list = webfaction.list_apps(session_id) - app_map = dict([(i['name'], i) for i in app_list]) - existing_app = app_map.get(app_name) - - result = {} - - # Here's where the real stuff happens - - if app_state == 'present': - - # Does an app with this name already exist? - if existing_app: - if existing_app['type'] != app_type: - module.fail_json(msg="App already exists with different type. Please fix by hand.") - - # If it exists with the right type, we don't change it - # Should check other parameters. - module.exit_json( - changed=False, - result=existing_app, - ) - - if not module.check_mode: - # If this isn't a dry run, create the app - result.update( - webfaction.create_app( - session_id, app_name, app_type, - module.boolean(module.params['autostart']), - module.params['extra_info'], - module.boolean(module.params['port_open']) - ) - ) - - elif app_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_app: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_app(session_id, app_name) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(app_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/webfaction_db.py b/plugins/modules/webfaction_db.py deleted file mode 100644 index 5428de5b63..0000000000 --- a/plugins/modules/webfaction_db.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Create a webfaction database using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: webfaction_db -short_description: Add or remove a database on Webfaction -description: - - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as - your host, you may want to add C(serial=1) to the plays. - - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - - name: - description: - - The name of the database - required: true - type: str - - state: - description: - - Whether the database should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of database to create. - required: true - choices: ['mysql', 'postgresql'] - type: str - - password: - description: - - The password for the new database user. - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str -''' - -EXAMPLES = ''' - # This will also create a default DB user with the same - # name as the database, and the specified password. - - - name: Create a database - community.general.webfaction_db: - name: "{{webfaction_user}}_db1" - password: mytestsql - type: mysql - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" - - # Note that, for symmetry's sake, deleting a database using - # 'state: absent' will also delete the matching user. - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - type=dict(required=True, choices=['mysql', 'postgresql']), - password=dict(no_log=True), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(), - ), - supports_check_mode=True - ) - db_name = module.params['name'] - db_state = module.params['state'] - db_type = module.params['type'] - db_passwd = module.params['password'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - db_list = webfaction.list_dbs(session_id) - db_map = dict([(i['name'], i) for i in db_list]) - existing_db = db_map.get(db_name) - - user_list = webfaction.list_db_users(session_id) - user_map = dict([(i['username'], i) for i in user_list]) - existing_user = user_map.get(db_name) - - result = {} - - # Here's where the real stuff happens - - if db_state == 'present': - - # Does a database with this name already exist? - if existing_db: - # Yes, but of a different type - fail - if existing_db['db_type'] != db_type: - module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") - - # If it exists with the right type, we don't change anything. - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, create the db - # and default user. - result.update( - webfaction.create_db( - session_id, db_name, db_type, db_passwd - ) - ) - - elif db_state == 'absent': - - # If this isn't a dry run... - if not module.check_mode: - - if not (existing_db or existing_user): - module.exit_json(changed=False,) - - if existing_db: - # Delete the db if it exists - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) - - if existing_user: - # Delete the default db user if it exists - result.update( - webfaction.delete_db_user(session_id, db_name, db_type) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(db_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/webfaction_domain.py b/plugins/modules/webfaction_domain.py deleted file mode 100644 index 4c87a539a8..0000000000 --- a/plugins/modules/webfaction_domain.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Create Webfaction domains and subdomains using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: webfaction_domain -short_description: Add or remove domains and subdomains on Webfaction -description: - - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - If you are I(deleting) domains by using O(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. - If you do not specify subdomains, the domain will be deleted. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as - your host, you may want to add C(serial=1) to the plays. - - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info. - -extends_documentation_fragment: - - community.general.attributes - -attributes: - check_mode: - support: full - diff_mode: - support: none - -options: - - name: - description: - - The name of the domain - required: true - type: str - - state: - description: - - Whether the domain should exist - choices: ['present', 'absent'] - default: "present" - type: str - - subdomains: - description: - - Any subdomains to create. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a test domain - community.general.webfaction_domain: - name: mydomain.com - state: present - subdomains: - - www - - blog - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - - - name: Delete test domain and any subdomains - community.general.webfaction_domain: - name: mydomain.com - state: absent - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - subdomains=dict(default=[], type='list', elements='str'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - domain_name = module.params['name'] - domain_state = module.params['state'] - domain_subdomains = module.params['subdomains'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - domain_list = webfaction.list_domains(session_id) - domain_map = dict([(i['domain'], i) for i in domain_list]) - existing_domain = domain_map.get(domain_name) - - result = {} - - # Here's where the real stuff happens - - if domain_state == 'present': - - # Does an app with this name already exist? - if existing_domain: - - if set(existing_domain['subdomains']) >= set(domain_subdomains): - # If it exists with the right subdomains, we don't change anything. - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args - result.update( - webfaction.create_domain( - *positional_args - ) - ) - - elif domain_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_domain: - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_domain(*positional_args) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/webfaction_mailbox.py b/plugins/modules/webfaction_mailbox.py deleted file mode 100644 index 119dfd283f..0000000000 --- a/plugins/modules/webfaction_mailbox.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2015, Quentin Stafford-Fraser and Andy Baker -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Create webfaction mailbox using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: webfaction_mailbox -short_description: Add or remove mailboxes on Webfaction -description: - - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as - your host, you may want to add C(serial=1) to the plays. - - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info. - -extends_documentation_fragment: - - community.general.attributes - -attributes: - check_mode: - support: full - diff_mode: - support: none - -options: - - mailbox_name: - description: - - The name of the mailbox - required: true - type: str - - mailbox_password: - description: - - The password for the mailbox - required: true - type: str - - state: - description: - - Whether the mailbox should exist - choices: ['present', 'absent'] - default: "present" - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a mailbox - community.general.webfaction_mailbox: - mailbox_name="mybox" - mailbox_password="myboxpw" - state=present - login_name={{webfaction_user}} - login_password={{webfaction_passwd}} -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - mailbox_name=dict(required=True), - mailbox_password=dict(required=True, no_log=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - - mailbox_name = module.params['mailbox_name'] - site_state = module.params['state'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] - existing_mailbox = mailbox_name in mailbox_list - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a mailbox with this name already exist? - if existing_mailbox: - module.exit_json(changed=False,) - - positional_args = [session_id, mailbox_name] - - if not module.check_mode: - # If this isn't a dry run, create the mailbox - result.update(webfaction.create_mailbox(*positional_args)) - - elif site_state == 'absent': - - # If the mailbox is already not there, nothing changed. - if not existing_mailbox: - module.exit_json(changed=False) - - if not module.check_mode: - # If this isn't a dry run, delete the mailbox - result.update(webfaction.delete_mailbox(session_id, mailbox_name)) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json(changed=True, result=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/webfaction_site.py b/plugins/modules/webfaction_site.py deleted file mode 100644 index 7795c45fe8..0000000000 --- a/plugins/modules/webfaction_site.py +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Create Webfaction website using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- - -deprecated: - removed_in: 9.0.0 - why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS. - alternative: no known alternative at this point - -module: webfaction_site -short_description: Add or remove a website on a Webfaction host -description: - - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you do not need to know the IP - address. You can use a DNS name. - - If a site of the same name exists in the account but on a different host, the operation will exit. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as - your host, you may want to add C(serial=1) to the plays. - - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info. - -extends_documentation_fragment: - - community.general.attributes - -attributes: - check_mode: - support: full - diff_mode: - support: none - -options: - - name: - description: - - The name of the website - required: true - type: str - - state: - description: - - Whether the website should exist - choices: ['present', 'absent'] - default: "present" - type: str - - host: - description: - - The webfaction host on which the site should be created. - required: true - type: str - - https: - description: - - Whether or not to use HTTPS - type: bool - default: false - - site_apps: - description: - - A mapping of URLs to apps - default: [] - type: list - elements: list - - subdomains: - description: - - A list of subdomains associated with this site. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create website - community.general.webfaction_site: - name: testsite1 - state: present - host: myhost.webfaction.com - subdomains: - - 'testsite1.my_domain.org' - site_apps: - - ['testapp1', '/'] - https: false - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - host=dict(required=True), - https=dict(required=False, type='bool', default=False), - subdomains=dict(type='list', elements='str', default=[]), - site_apps=dict(type='list', elements='list', default=[]), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - site_name = module.params['name'] - site_state = module.params['state'] - site_host = module.params['host'] - site_ip = socket.gethostbyname(site_host) - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - site_list = webfaction.list_websites(session_id) - site_map = dict([(i['name'], i) for i in site_list]) - existing_site = site_map.get(site_name) - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a site with this name already exist? - if existing_site: - - # If yes, but it's on a different IP address, then fail. - # If we wanted to allow relocation, we could add a 'relocate=true' option - # which would get the existing IP address, delete the site there, and create it - # at the new address. A bit dangerous, perhaps, so for now we'll require manual - # deletion if it's on another host. - - if existing_site['ip'] != site_ip: - module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") - - # If it's on this host and the key parameters are the same, nothing needs to be done. - - if (existing_site['https'] == module.boolean(module.params['https'])) and \ - (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ - (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): - module.exit_json( - changed=False - ) - - positional_args = [ - session_id, site_name, site_ip, - module.boolean(module.params['https']), - module.params['subdomains'], - ] - for a in module.params['site_apps']: - positional_args.append((a[0], a[1])) - - if not module.check_mode: - # If this isn't a dry run, create or modify the site - result.update( - webfaction.create_website( - *positional_args - ) if not existing_site else webfaction.update_website( - *positional_args - ) - ) - - elif site_state == 'absent': - - # If the site's already not there, nothing changed. - if not existing_site: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the site - result.update( - webfaction.delete_website(session_id, site_name, site_ip) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/tests/integration/targets/module_helper/library/mdepfail.py b/tests/integration/targets/module_helper/library/mdepfail.py index 92ebbde6e8..b61c32a4da 100644 --- a/tests/integration/targets/module_helper/library/mdepfail.py +++ b/tests/integration/targets/module_helper/library/mdepfail.py @@ -30,10 +30,10 @@ EXAMPLES = "" RETURN = "" +from ansible_collections.community.general.plugins.module_utils import deps from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible.module_utils.basic import missing_required_lib -with ModuleHelper.dependency("nopackagewiththisname", missing_required_lib("nopackagewiththisname")): +with deps.declare("nopackagewiththisname"): import nopackagewiththisname # noqa: F401, pylint: disable=unused-import @@ -50,6 +50,7 @@ class MSimple(ModuleHelper): def __init_module__(self): self.vars.set('value', None) self.vars.set('abc', "abc", diff=True) + deps.validate(self.module) def __run__(self): if (0 if self.vars.a is None else self.vars.a) >= 100: diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index 0665ddc1a1..954a8afebf 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -6,9 +6,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/read_csv.py validate-modules:invalid-documentation plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index fed147e446..01b195e9f5 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -7,9 +7,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/read_csv.py validate-modules:invalid-documentation plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt index d4c92c4d9b..667c6cee4d 100644 --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -5,9 +5,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index 397c6d9865..f6b058ec69 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -5,9 +5,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt index d75aaeac27..7479d6bafe 100644 --- a/tests/sanity/ignore-2.17.txt +++ b/tests/sanity/ignore-2.17.txt @@ -5,9 +5,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt index d75aaeac27..7479d6bafe 100644 --- a/tests/sanity/ignore-2.18.txt +++ b/tests/sanity/ignore-2.18.txt @@ -5,9 +5,6 @@ plugins/modules/iptables_state.py validate-modules:undocumented-parameter plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0 -plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0 -plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0 plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index 3ed718d483..4eed957206 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -7,6 +7,7 @@ - id: install_dancer_compatibility input: name: Dancer + mode: compatibility output: changed: true run_command_calls: @@ -23,6 +24,7 @@ - id: install_dancer_already_installed_compatibility input: name: Dancer + mode: compatibility output: changed: false run_command_calls: @@ -34,7 +36,6 @@ - id: install_dancer input: name: Dancer - mode: new output: changed: true run_command_calls: @@ -46,6 +47,7 @@ - id: install_distribution_file_compatibility input: name: MIYAGAWA/Plack-0.99_05.tar.gz + mode: compatibility output: changed: true run_command_calls: @@ -57,7 +59,6 @@ - id: install_distribution_file input: name: MIYAGAWA/Plack-0.99_05.tar.gz - mode: new output: changed: true run_command_calls: From 62138b288a5db4f841a722edf4aaa1901d375e25 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 24 Apr 2024 08:07:29 +1200 Subject: [PATCH 051/482] Change MH to use the module_utils.vardict.VarDict (#8226) * change MH to use the module_utils.vardict.VarDict * remove VarsMixin from superclasses of MH * bump vardict deprecation to 11.0.0 + add old/new vardict selection in MH * improve backawards compatibility * improve backawards compatibility * use new vardict in some modules, make adjustments * add changelog frag * adjustment after rebase --- changelogs/fragments/8226-mh-vardict.yml | 10 +++++ plugins/module_utils/mh/mixins/vars.py | 6 +-- plugins/module_utils/mh/module_helper.py | 41 +++++++++++++++---- plugins/modules/gconftool2.py | 1 + plugins/modules/kernel_blacklist.py | 1 + plugins/modules/opkg.py | 1 + plugins/modules/pipx.py | 1 + plugins/modules/xfconf.py | 3 +- plugins/modules/xfconf_info.py | 3 +- .../targets/module_helper/library/mstate.py | 1 + 10 files changed, 56 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/8226-mh-vardict.yml diff --git a/changelogs/fragments/8226-mh-vardict.yml b/changelogs/fragments/8226-mh-vardict.yml new file mode 100644 index 0000000000..c7c62c7db0 --- /dev/null +++ b/changelogs/fragments/8226-mh-vardict.yml @@ -0,0 +1,10 @@ +deprecated_features: + - ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226). + - ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226). +minor_changes: + - gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py index 91f4e4a189..1615609735 100644 --- a/plugins/module_utils/mh/mixins/vars.py +++ b/plugins/module_utils/mh/mixins/vars.py @@ -14,7 +14,7 @@ class VarMeta(object): """ DEPRECATION WARNING - This class is deprecated and will be removed in community.general 10.0.0 + This class is deprecated and will be removed in community.general 11.0.0 Modules should use the VarDict from plugins/module_utils/vardict.py instead. """ @@ -70,7 +70,7 @@ class VarDict(object): """ DEPRECATION WARNING - This class is deprecated and will be removed in community.general 10.0.0 + This class is deprecated and will be removed in community.general 11.0.0 Modules should use the VarDict from plugins/module_utils/vardict.py instead. """ def __init__(self): @@ -139,7 +139,7 @@ class VarsMixin(object): """ DEPRECATION WARNING - This class is deprecated and will be removed in community.general 10.0.0 + This class is deprecated and will be removed in community.general 11.0.0 Modules should use the VarDict from plugins/module_utils/vardict.py instead. """ def __init__(self, module=None): diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index 3390303ce8..ca95199d9b 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project +# (c) 2020-2024, Alexei Znamensky +# Copyright (c) 2020-2024, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause @@ -10,22 +10,40 @@ __metaclass__ = type from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.vardict import VarDict as _NewVarDict # remove "as NewVarDict" in 11.0.0 # (TODO: remove AnsibleModule!) pylint: disable-next=unused-import -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.base import AnsibleModule # noqa: F401 DEPRECATED, remove in 11.0.0 +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin +# (TODO: remove mh.mixins.vars!) pylint: disable-next=unused-import +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _OldVarDict # noqa: F401 remove in 11.0.0 from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin -class ModuleHelper(DeprecateAttrsMixin, VarsMixin, ModuleHelperBase): +class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): facts_name = None output_params = () diff_params = () change_params = () facts_params = () + use_old_vardict = True # remove in 11.0.0 + mute_vardict_deprecation = False def __init__(self, module=None): - super(ModuleHelper, self).__init__(module) + if self.use_old_vardict: # remove first half of the if in 11.0.0 + self.vars = _OldVarDict() + super(ModuleHelper, self).__init__(module) + if not self.mute_vardict_deprecation: + self.module.deprecate( + "This class is using the old VarDict from ModuleHelper, which is deprecated. " + "Set the class variable use_old_vardict to False and make the necessary adjustments." + "The old VarDict class will be removed in community.general 11.0.0", + version="11.0.0", collection_name="community.general" + ) + else: + self.vars = _NewVarDict() + super(ModuleHelper, self).__init__(module) + for name, value in self.module.params.items(): self.vars.set( name, value, @@ -35,6 +53,12 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, ModuleHelperBase): fact=name in self.facts_params, ) + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) + def update_output(self, **kwargs): self.update_vars(meta={"output": True}, **kwargs) @@ -42,7 +66,10 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, ModuleHelperBase): self.update_vars(meta={"fact": True}, **kwargs) def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + if self.use_old_vardict: + return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + + return self.vars.has_changed def has_changed(self): return self.changed or self._vars_changed() diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index a40304a166..db7c6dc883 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -123,6 +123,7 @@ class GConftool(StateModuleHelper): ], supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py index b5bd904036..2a281440a7 100644 --- a/plugins/modules/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -67,6 +67,7 @@ class Blacklist(StateModuleHelper): ), supports_check_mode=True, ) + mute_vardict_deprecation = True def __init_module__(self): self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index 757c88c5de..2f9794ab86 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -127,6 +127,7 @@ class Opkg(StateModuleHelper): executable=dict(type="path"), ), ) + use_old_vardict = False def __init_module__(self): self.vars.set("install_c", 0, output=False, change=True) diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 705cc71a77..e82e4c32a2 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -201,6 +201,7 @@ class PipX(StateModuleHelper): ], supports_check_mode=True, ) + use_old_vardict = False def _retrieve_installed(self): def process_list(rc, out, err): diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 8ed44c675d..15943ae59d 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -187,6 +187,7 @@ class XFConfProperty(StateModuleHelper): required_together=[('value', 'value_type')], supports_check_mode=True, ) + use_old_vardict = False default_state = 'present' @@ -196,7 +197,7 @@ class XFConfProperty(StateModuleHelper): self.vars.channel) self.vars.set('previous_value', self._get()) self.vars.set('type', self.vars.value_type) - self.vars.meta('value').set(initial_value=self.vars.previous_value) + self.vars.set_meta('value', initial_value=self.vars.previous_value) def process_command_output(self, rc, out, err): if err.rstrip() == self.does_not: diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index 844ef3c111..3d56a70cb9 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -139,6 +139,7 @@ class XFConfInfo(ModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.runner = xfconf_runner(self.module, check_rc=True) @@ -176,7 +177,7 @@ class XFConfInfo(ModuleHelper): proc = self._process_list_properties with self.runner.context('list_arg channel property', output_process=proc) as ctx: - result = ctx.run(**self.vars) + result = ctx.run(**self.vars.as_dict()) if not self.vars.list_arg and self.vars.is_array: output = "value_array" diff --git a/tests/integration/targets/module_helper/library/mstate.py b/tests/integration/targets/module_helper/library/mstate.py index bfaab03755..b3b4ed5e69 100644 --- a/tests/integration/targets/module_helper/library/mstate.py +++ b/tests/integration/targets/module_helper/library/mstate.py @@ -49,6 +49,7 @@ class MState(StateModuleHelper): state=dict(type='str', choices=['join', 'b_x_a', 'c_x_a', 'both_x_a', 'nop'], default='join'), ), ) + use_old_vardict = False def __init_module__(self): self.vars.set('result', "abc", diff=True) From 45c2e0f8d053a85976847b2b247b04c6aacaeb9d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 27 Apr 2024 06:56:08 +1200 Subject: [PATCH 052/482] use smaller snap, add disabled to aliases (#8237) * use smaller snap, add disabled to aliases * rollback tag disabled in aliases * comment out the test_dangerous as it takes too long * comment out the test_dangerous as it takes too long * Update tests/integration/targets/snap/tasks/main.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- tests/integration/targets/snap/tasks/main.yml | 7 ++- .../targets/snap/tasks/test_channel.yml | 43 +++++++++---------- 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index 2a683617ae..a2d8698d0f 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -13,10 +13,9 @@ block: - name: Include test ansible.builtin.include_tasks: test.yml - # TODO: Find better package to install from a channel - microk8s installation takes multiple minutes, and even removal takes one minute! - # - name: Include test_channel - # ansible.builtin.include_tasks: test_channel.yml - # TODO: Find bettter package to download and install from sources - cider 1.6.0 takes over 35 seconds to install + - name: Include test_channel + ansible.builtin.include_tasks: test_channel.yml + # TODO: Find better package to download and install from sources - cider 1.6.0 takes over 35 seconds to install # - name: Include test_dangerous # ansible.builtin.include_tasks: test_dangerous.yml - name: Include test_3dash diff --git a/tests/integration/targets/snap/tasks/test_channel.yml b/tests/integration/targets/snap/tasks/test_channel.yml index e9eb19c897..3537357615 100644 --- a/tests/integration/targets/snap/tasks/test_channel.yml +++ b/tests/integration/targets/snap/tasks/test_channel.yml @@ -5,47 +5,44 @@ # NOTE This is currently disabled for performance reasons! -- name: Make sure package is not installed (microk8s) +- name: Make sure package is not installed (wisdom) community.general.snap: - name: microk8s + name: wisdom state: absent # Test for https://github.com/ansible-collections/community.general/issues/1606 -- name: Install package (microk8s) +- name: Install package (wisdom) community.general.snap: - name: microk8s - classic: true + name: wisdom state: present - register: install_microk8s + register: install_wisdom -- name: Install package with channel (microk8s) +- name: Install package with channel (wisdom) community.general.snap: - name: microk8s - classic: true - channel: 1.20/stable + name: wisdom state: present - register: install_microk8s_chan + channel: latest/edge + register: install_wisdom_chan -- name: Install package with channel (microk8s) again +- name: Install package with channel (wisdom) again community.general.snap: - name: microk8s - classic: true - channel: 1.20/stable + name: wisdom state: present - register: install_microk8s_chan_again + channel: latest/edge + register: install_wisdom_chan_again -- name: Remove package (microk8s) +- name: Remove package (wisdom) community.general.snap: - name: microk8s + name: wisdom state: absent - register: remove_microk8s + register: remove_wisdom - assert: that: - - install_microk8s is changed - - install_microk8s_chan is changed - - install_microk8s_chan_again is not changed - - remove_microk8s is changed + - install_wisdom is changed + - install_wisdom_chan is changed + - install_wisdom_chan_again is not changed + - remove_wisdom is changed - name: Install package (shellcheck) community.general.snap: From fc2024d837581fcaa427fb105c111fa194696acd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 28 Apr 2024 17:19:54 +0200 Subject: [PATCH 053/482] CI: Arch Linux switched to Python 3.12 (#8291) Arch Linux switched to Python 3.12. --- .azure-pipelines/azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index be8f011bdf..6f5a391c4c 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -336,7 +336,7 @@ stages: - name: Debian Bookworm test: debian-bookworm/3.11 - name: ArchLinux - test: archlinux/3.11 + test: archlinux/3.12 groups: - 1 - 2 From 7051fe344970bf59b60e9bbaeb09d78105fc48f9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 30 Apr 2024 03:26:14 +1200 Subject: [PATCH 054/482] PythonRunner: a command runner for python (#8289) * PythonRunner: a command runner for python * add changelog frag * Update changelogs/fragments/8289-python-runner.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 4 + changelogs/fragments/8289-python-runner.yml | 2 + plugins/module_utils/python_runner.py | 34 +++ .../module_utils/test_python_runner.py | 223 ++++++++++++++++++ 4 files changed, 263 insertions(+) create mode 100644 changelogs/fragments/8289-python-runner.yml create mode 100644 plugins/module_utils/python_runner.py create mode 100644 tests/unit/plugins/module_utils/test_python_runner.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 3d09cf4c5b..41a4824d26 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -294,6 +294,8 @@ files: labels: module_utils $module_utils/btrfs.py: maintainers: gnfzdz + $module_utils/cmd_runner.py: + maintainers: russoz $module_utils/deps.py: maintainers: russoz $module_utils/gconftool2.py: @@ -339,6 +341,8 @@ files: $module_utils/pipx.py: labels: pipx maintainers: russoz + $module_utils/python_runner.py: + maintainers: russoz $module_utils/puppet.py: labels: puppet maintainers: russoz diff --git a/changelogs/fragments/8289-python-runner.yml b/changelogs/fragments/8289-python-runner.yml new file mode 100644 index 0000000000..97a45fd8f3 --- /dev/null +++ b/changelogs/fragments/8289-python-runner.yml @@ -0,0 +1,2 @@ +minor_changes: + - PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289). diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py new file mode 100644 index 0000000000..f678f247b4 --- /dev/null +++ b/plugins/module_utils/python_runner.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, _ensure_list + + +class PythonRunner(CmdRunner): + def __init__(self, module, command, arg_formats=None, default_args_order=(), + check_rc=False, force_lang="C", path_prefix=None, environ_update=None, + python="python", venv=None): + self.python = python + self.venv = venv + self.has_venv = venv is not None + + if (os.path.isabs(python) or '/' in python): + self.python = python + elif self.has_venv: + path_prefix = os.path.join(venv, "bin") + if environ_update is None: + environ_update = {} + environ_update["PATH"] = "%s:%s" % (path_prefix, os.environ["PATH"]) + environ_update["VIRTUAL_ENV"] = venv + + python_cmd = [self.python] + _ensure_list(command) + + super(PythonRunner, self).__init__(module, python_cmd, arg_formats, default_args_order, + check_rc, force_lang, path_prefix, environ_update) diff --git a/tests/unit/plugins/module_utils/test_python_runner.py b/tests/unit/plugins/module_utils/test_python_runner.py new file mode 100644 index 0000000000..015065bdd4 --- /dev/null +++ b/tests/unit/plugins/module_utils/test_python_runner.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import pytest + +from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, PropertyMock +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + + +TC_RUNNER = dict( + # SAMPLE: This shows all possible elements of a test case. It does not actually run. + # + # testcase_name=( + # # input + # dict( + # args_bundle = dict( + # param1=dict( + # type="int", + # value=11, + # fmt_func=cmd_runner_fmt.as_opt_eq_val, + # fmt_arg="--answer", + # ), + # param2=dict( + # fmt_func=cmd_runner_fmt.as_bool, + # fmt_arg="--bb-here", + # ) + # ), + # runner_init_args = dict( + # command="testing", + # default_args_order=(), + # check_rc=False, + # force_lang="C", + # path_prefix=None, + # environ_update=None, + # ), + # runner_ctx_args = dict( + # args_order=['aa', 'bb'], + # output_process=None, + # ignore_value_none=True, + # ), + # ), + # # command execution + # dict( + # runner_ctx_run_args = dict(bb=True), + # rc = 0, + # out = "", + # err = "", + # ), + # # expected + # dict( + # results=(), + # run_info=dict( + # cmd=['/mock/bin/testing', '--answer=11', '--bb-here'], + # environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, + # ), + # exc=None, + # ), + # ), + # + aa_bb=( + dict( + args_bundle=dict( + aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), + bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), + ), + runner_init_args=dict(command="testing"), + runner_ctx_args=dict(args_order=['aa', 'bb']), + ), + dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), + dict( + run_info=dict( + cmd=['/mock/bin/python', 'testing', '--answer=11', '--bb-here'], + environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, + args_order=('aa', 'bb'), + ), + ), + ), + aa_bb_py3=( + dict( + args_bundle=dict( + aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), + bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), + ), + runner_init_args=dict(command="toasting", python="python3"), + runner_ctx_args=dict(args_order=['aa', 'bb']), + ), + dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), + dict( + run_info=dict( + cmd=['/mock/bin/python3', 'toasting', '--answer=11', '--bb-here'], + environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, + args_order=('aa', 'bb'), + ), + ), + ), + aa_bb_abspath=( + dict( + args_bundle=dict( + aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), + bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), + ), + runner_init_args=dict(command="toasting", python="/crazy/local/bin/python3"), + runner_ctx_args=dict(args_order=['aa', 'bb']), + ), + dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), + dict( + run_info=dict( + cmd=['/crazy/local/bin/python3', 'toasting', '--answer=11', '--bb-here'], + environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, + args_order=('aa', 'bb'), + ), + ), + ), + aa_bb_venv=( + dict( + args_bundle=dict( + aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), + bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), + ), + runner_init_args=dict(command="toasting", venv="/venv"), + runner_ctx_args=dict(args_order=['aa', 'bb']), + ), + dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), + dict( + run_info=dict( + cmd=['/venv/bin/python', 'toasting', '--answer=11', '--bb-here'], + environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C', 'VIRTUAL_ENV': '/venv', 'PATH': '/venv/bin'}, + args_order=('aa', 'bb'), + ), + ), + ), +) +TC_RUNNER_IDS = sorted(TC_RUNNER.keys()) + + +@pytest.mark.parametrize('runner_input, cmd_execution, expected', + (TC_RUNNER[tc] for tc in TC_RUNNER_IDS), + ids=TC_RUNNER_IDS) +def test_runner_context(runner_input, cmd_execution, expected): + arg_spec = {} + params = {} + arg_formats = {} + for k, v in runner_input['args_bundle'].items(): + try: + arg_spec[k] = {'type': v['type']} + except KeyError: + pass + try: + params[k] = v['value'] + except KeyError: + pass + try: + arg_formats[k] = v['fmt_func'](v['fmt_arg']) + except KeyError: + pass + + orig_results = tuple(cmd_execution[x] for x in ('rc', 'out', 'err')) + + print("arg_spec={0}\nparams={1}\narg_formats={2}\n".format( + arg_spec, + params, + arg_formats, + )) + + module = MagicMock() + type(module).argument_spec = PropertyMock(return_value=arg_spec) + type(module).params = PropertyMock(return_value=params) + module.get_bin_path.return_value = os.path.join( + runner_input["runner_init_args"].get("venv", "/mock"), + "bin", + runner_input["runner_init_args"].get("python", "python") + ) + module.run_command.return_value = orig_results + + runner = PythonRunner( + module=module, + arg_formats=arg_formats, + **runner_input['runner_init_args'] + ) + + def _extract_path(run_info): + path = run_info.get("environ_update", {}).get("PATH") + if path is not None: + run_info["environ_update"] = dict((k, v) + for k, v in run_info["environ_update"].items() + if k != "PATH") + return run_info, path + + def _assert_run_info_env_path(actual, expected): + actual2 = set(actual.split(":")) + assert expected in actual2, "Missing expected path {0} in output PATH: {1}".format(expected, actual) + + def _assert_run_info(actual, expected): + reduced = dict((k, actual[k]) for k in expected.keys()) + reduced, act_path = _extract_path(reduced) + expected, exp_path = _extract_path(expected) + if exp_path is not None: + _assert_run_info_env_path(act_path, exp_path) + assert reduced == expected, "{0}".format(reduced) + + def _assert_run(expected, ctx, results): + _assert_run_info(ctx.run_info, expected['run_info']) + assert results == expected.get('results', orig_results) + + exc = expected.get("exc") + if exc: + with pytest.raises(exc): + with runner.context(**runner_input['runner_ctx_args']) as ctx: + results = ctx.run(**cmd_execution['runner_ctx_run_args']) + _assert_run(expected, ctx, results) + + else: + with runner.context(**runner_input['runner_ctx_args']) as ctx: + results = ctx.run(**cmd_execution['runner_ctx_run_args']) + _assert_run(expected, ctx, results) From 85f9d895107493a4cca27872868faae7cc0d6918 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 30 Apr 2024 03:26:31 +1200 Subject: [PATCH 055/482] CmdRunner format as_list - check for min_len and max_len (#8288) * CmdRunner format as_list - check for min_len and max_len * Change default min len, add chglog frag --- .../8288-cmdrunner-fmt-list-len-limits.yml | 2 + plugins/module_utils/cmd_runner.py | 11 ++- .../plugins/module_utils/test_cmd_runner.py | 82 +++++++++++-------- 3 files changed, 57 insertions(+), 38 deletions(-) create mode 100644 changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml diff --git a/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml b/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml new file mode 100644 index 0000000000..94de04740b --- /dev/null +++ b/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd_runner module_utils - add validation for minimum and maximum length in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 8649871207..2bf2b32e8c 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -129,8 +129,15 @@ class _Format(object): return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none) @staticmethod - def as_list(ignore_none=None): - return _ArgFormat(_ensure_list, ignore_none=ignore_none) + def as_list(ignore_none=None, min_len=0, max_len=None): + def func(value): + value = _ensure_list(value) + if len(value) < min_len: + raise ValueError("Parameter must have at least {0} element(s)".format(min_len)) + if max_len is not None and len(value) > max_len: + raise ValueError("Parameter must have at most {0} element(s)".format(max_len)) + return value + return _ArgFormat(func, ignore_none=ignore_none) @staticmethod def as_fixed(args): diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index 86576e8ce4..6816afb34c 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -7,6 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from sys import version_info +from functools import partial import pytest @@ -15,55 +16,64 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor TC_FORMATS = dict( - simple_boolean__true=(cmd_runner_fmt.as_bool, ("--superflag",), True, ["--superflag"]), - simple_boolean__false=(cmd_runner_fmt.as_bool, ("--superflag",), False, []), - simple_boolean__none=(cmd_runner_fmt.as_bool, ("--superflag",), None, []), - simple_boolean_both__true=(cmd_runner_fmt.as_bool, ("--superflag", "--falseflag"), True, ["--superflag"]), - simple_boolean_both__false=(cmd_runner_fmt.as_bool, ("--superflag", "--falseflag"), False, ["--falseflag"]), - simple_boolean_both__none=(cmd_runner_fmt.as_bool, ("--superflag", "--falseflag"), None, ["--falseflag"]), - simple_boolean_both__none_ig=(cmd_runner_fmt.as_bool, ("--superflag", "--falseflag", True), None, []), - simple_boolean_not__true=(cmd_runner_fmt.as_bool_not, ("--superflag",), True, []), - simple_boolean_not__false=(cmd_runner_fmt.as_bool_not, ("--superflag",), False, ["--superflag"]), - simple_boolean_not__none=(cmd_runner_fmt.as_bool_not, ("--superflag",), None, ["--superflag"]), - simple_optval__str=(cmd_runner_fmt.as_optval, ("-t",), "potatoes", ["-tpotatoes"]), - simple_optval__int=(cmd_runner_fmt.as_optval, ("-t",), 42, ["-t42"]), - simple_opt_val__str=(cmd_runner_fmt.as_opt_val, ("-t",), "potatoes", ["-t", "potatoes"]), - simple_opt_val__int=(cmd_runner_fmt.as_opt_val, ("-t",), 42, ["-t", "42"]), - simple_opt_eq_val__str=(cmd_runner_fmt.as_opt_eq_val, ("--food",), "potatoes", ["--food=potatoes"]), - simple_opt_eq_val__int=(cmd_runner_fmt.as_opt_eq_val, ("--answer",), 42, ["--answer=42"]), - simple_list_potato=(cmd_runner_fmt.as_list, (), "literal_potato", ["literal_potato"]), - simple_list_42=(cmd_runner_fmt.as_list, (), 42, ["42"]), - simple_map=(cmd_runner_fmt.as_map, ({'a': 1, 'b': 2, 'c': 3},), 'b', ["2"]), - simple_default_type__list=(cmd_runner_fmt.as_default_type, ("list",), [1, 2, 3, 5, 8], ["--1", "--2", "--3", "--5", "--8"]), - simple_default_type__bool_true=(cmd_runner_fmt.as_default_type, ("bool", "what"), True, ["--what"]), - simple_default_type__bool_false=(cmd_runner_fmt.as_default_type, ("bool", "what"), False, []), - simple_default_type__potato=(cmd_runner_fmt.as_default_type, ("any-other-type", "potato"), "42", ["--potato", "42"]), - simple_fixed_true=(cmd_runner_fmt.as_fixed, [("--always-here", "--forever")], True, ["--always-here", "--forever"]), - simple_fixed_false=(cmd_runner_fmt.as_fixed, [("--always-here", "--forever")], False, ["--always-here", "--forever"]), - simple_fixed_none=(cmd_runner_fmt.as_fixed, [("--always-here", "--forever")], None, ["--always-here", "--forever"]), - simple_fixed_str=(cmd_runner_fmt.as_fixed, [("--always-here", "--forever")], "something", ["--always-here", "--forever"]), + simple_boolean__true=(partial(cmd_runner_fmt.as_bool, "--superflag"), True, ["--superflag"], None), + simple_boolean__false=(partial(cmd_runner_fmt.as_bool, "--superflag"), False, [], None), + simple_boolean__none=(partial(cmd_runner_fmt.as_bool, "--superflag"), None, [], None), + simple_boolean_both__true=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), True, ["--superflag"], None), + simple_boolean_both__false=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), False, ["--falseflag"], None), + simple_boolean_both__none=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), None, ["--falseflag"], None), + simple_boolean_both__none_ig=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag", True), None, [], None), + simple_boolean_not__true=(partial(cmd_runner_fmt.as_bool_not, "--superflag"), True, [], None), + simple_boolean_not__false=(partial(cmd_runner_fmt.as_bool_not, "--superflag"), False, ["--superflag"], None), + simple_boolean_not__none=(partial(cmd_runner_fmt.as_bool_not, "--superflag"), None, ["--superflag"], None), + simple_optval__str=(partial(cmd_runner_fmt.as_optval, "-t"), "potatoes", ["-tpotatoes"], None), + simple_optval__int=(partial(cmd_runner_fmt.as_optval, "-t"), 42, ["-t42"], None), + simple_opt_val__str=(partial(cmd_runner_fmt.as_opt_val, "-t"), "potatoes", ["-t", "potatoes"], None), + simple_opt_val__int=(partial(cmd_runner_fmt.as_opt_val, "-t"), 42, ["-t", "42"], None), + simple_opt_eq_val__str=(partial(cmd_runner_fmt.as_opt_eq_val, "--food"), "potatoes", ["--food=potatoes"], None), + simple_opt_eq_val__int=(partial(cmd_runner_fmt.as_opt_eq_val, "--answer"), 42, ["--answer=42"], None), + simple_list_potato=(cmd_runner_fmt.as_list, "literal_potato", ["literal_potato"], None), + simple_list_42=(cmd_runner_fmt.as_list, 42, ["42"], None), + simple_list_min_len_ok=(partial(cmd_runner_fmt.as_list, min_len=1), 42, ["42"], None), + simple_list_min_len_fail=(partial(cmd_runner_fmt.as_list, min_len=10), 42, None, ValueError), + simple_list_max_len_ok=(partial(cmd_runner_fmt.as_list, max_len=1), 42, ["42"], None), + simple_list_max_len_fail=(partial(cmd_runner_fmt.as_list, max_len=2), [42, 42, 42], None, ValueError), + simple_map=(partial(cmd_runner_fmt.as_map, {'a': 1, 'b': 2, 'c': 3}), 'b', ["2"], None), + simple_default_type__list=(partial(cmd_runner_fmt.as_default_type, "list"), [1, 2, 3, 5, 8], ["--1", "--2", "--3", "--5", "--8"], None), + simple_default_type__bool_true=(partial(cmd_runner_fmt.as_default_type, "bool", "what"), True, ["--what"], None), + simple_default_type__bool_false=(partial(cmd_runner_fmt.as_default_type, "bool", "what"), False, [], None), + simple_default_type__potato=(partial(cmd_runner_fmt.as_default_type, "any-other-type", "potato"), "42", ["--potato", "42"], None), + simple_fixed_true=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), True, ["--always-here", "--forever"], None), + simple_fixed_false=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), False, ["--always-here", "--forever"], None), + simple_fixed_none=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), None, ["--always-here", "--forever"], None), + simple_fixed_str=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), "something", ["--always-here", "--forever"], None), ) if tuple(version_info) >= (3, 1): from collections import OrderedDict # needs OrderedDict to provide a consistent key order TC_FORMATS["simple_default_type__dict"] = ( # type: ignore - cmd_runner_fmt.as_default_type, - ("dict",), + partial(cmd_runner_fmt.as_default_type, "dict"), OrderedDict((('a', 1), ('b', 2))), - ["--a=1", "--b=2"] + ["--a=1", "--b=2"], + None ) TC_FORMATS_IDS = sorted(TC_FORMATS.keys()) -@pytest.mark.parametrize('func, fmt_opt, value, expected', +@pytest.mark.parametrize('func, value, expected, exception', (TC_FORMATS[tc] for tc in TC_FORMATS_IDS), ids=TC_FORMATS_IDS) -def test_arg_format(func, fmt_opt, value, expected): - fmt_func = func(*fmt_opt) - actual = fmt_func(value, ctx_ignore_none=True) - print("formatted string = {0}".format(actual)) - assert actual == expected, "actual = {0}".format(actual) +def test_arg_format(func, value, expected, exception): + fmt_func = func() + try: + actual = fmt_func(value, ctx_ignore_none=True) + print("formatted string = {0}".format(actual)) + assert actual == expected, "actual = {0}".format(actual) + except Exception as e: + if exception is None: + raise + assert isinstance(e, exception) TC_RUNNER = dict( From b48293ca3129a7f42d17525415443be2dc1a280c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 30 Apr 2024 03:27:06 +1200 Subject: [PATCH 056/482] MH: deprecate features (#8280) * deprecate features * add changelog frag --- changelogs/fragments/8280-mh-deprecations.yml | 8 ++++++++ plugins/module_utils/mh/mixins/deps.py | 6 ++++++ plugins/module_utils/module_helper.py | 10 +++++----- 3 files changed, 19 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/8280-mh-deprecations.yml diff --git a/changelogs/fragments/8280-mh-deprecations.yml b/changelogs/fragments/8280-mh-deprecations.yml new file mode 100644 index 0000000000..ae70f96b1e --- /dev/null +++ b/changelogs/fragments/8280-mh-deprecations.yml @@ -0,0 +1,8 @@ +deprecated_features: + - MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` (https://github.com/ansible-collections/community.general/pull/8280). diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py index 666081ccd1..dd879ff4b2 100644 --- a/plugins/module_utils/mh/mixins/deps.py +++ b/plugins/module_utils/mh/mixins/deps.py @@ -9,6 +9,12 @@ __metaclass__ = type class DependencyCtxMgr(object): + """ + DEPRECATION WARNING + + This class is deprecated and will be removed in community.general 11.0.0 + Modules should use plugins/module_utils/deps.py instead. + """ def __init__(self, name, msg=None): self.name = name self.msg = msg diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 4754ec9ad0..366699329a 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -9,14 +9,14 @@ __metaclass__ = type # pylint: disable=unused-import - from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, AnsibleModule + ModuleHelper, StateModuleHelper, + AnsibleModule # remove in 11.0.0 ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 remove in 11.0.0 +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 remove in 11.0.0 from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.deco import ( cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401 remove in 11.0.0 From 70adba89919bf5ac520b3f381a0d81e058e9f4fb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 29 Apr 2024 22:57:08 +0200 Subject: [PATCH 057/482] Convert some run_command() string args to lists (#8264) * Convert some run_command() string args to lists. * Change run_command with pipe and shell to Python code. * Add changelog. * Simplify syntax. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/8264-run_command.yml | 14 ++++ plugins/modules/aix_lvol.py | 19 +++-- plugins/modules/apt_rpm.py | 18 ++--- plugins/modules/btrfs_subvolume.py | 9 +-- plugins/modules/installp.py | 13 ++-- plugins/modules/lvg.py | 22 +++--- plugins/modules/lvol.py | 84 +++++++++++------------ plugins/modules/macports.py | 12 ++-- plugins/modules/parted.py | 9 +-- plugins/modules/pkgin.py | 25 +++---- plugins/modules/portinstall.py | 26 +++---- plugins/modules/slackpkg.py | 18 +++-- plugins/modules/svr4pkg.py | 2 +- plugins/modules/swdepot.py | 17 +++-- 14 files changed, 144 insertions(+), 144 deletions(-) create mode 100644 changelogs/fragments/8264-run_command.yml diff --git a/changelogs/fragments/8264-run_command.yml b/changelogs/fragments/8264-run_command.yml new file mode 100644 index 0000000000..dd66cd6123 --- /dev/null +++ b/changelogs/fragments/8264-run_command.yml @@ -0,0 +1,14 @@ +minor_changes: + - "aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "installp - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "lvg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "macports - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "parted - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "pkgin - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "portinstall - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "slackpkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." + - "swdepot - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py index 1e7b425687..7d0fb1ee09 100644 --- a/plugins/modules/aix_lvol.py +++ b/plugins/modules/aix_lvol.py @@ -240,8 +240,6 @@ def main(): state = module.params['state'] pvs = module.params['pvs'] - pv_list = ' '.join(pvs) - if policy == 'maximum': lv_policy = 'x' else: @@ -249,16 +247,16 @@ def main(): # Add echo command when running in check-mode if module.check_mode: - test_opt = 'echo ' + test_opt = [module.get_bin_path("echo", required=True)] else: - test_opt = '' + test_opt = [] # check if system commands are available lsvg_cmd = module.get_bin_path("lsvg", required=True) lslv_cmd = module.get_bin_path("lslv", required=True) # Get information on volume group requested - rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) + rc, vg_info, err = module.run_command([lsvg_cmd, vg]) if rc != 0: if state == 'absent': @@ -273,8 +271,7 @@ def main(): lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) # Get information on logical volume requested - rc, lv_info, err = module.run_command( - "%s %s" % (lslv_cmd, lv)) + rc, lv_info, err = module.run_command([lslv_cmd, lv]) if rc != 0: if state == 'absent': @@ -296,7 +293,7 @@ def main(): # create LV mklv_cmd = module.get_bin_path("mklv", required=True) - cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) + cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, "%sM" % (lv_size, )] + pvs rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s created." % lv) @@ -306,7 +303,7 @@ def main(): if state == 'absent': # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']]) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) else: @@ -315,7 +312,7 @@ def main(): if this_lv['policy'] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']]) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) else: @@ -331,7 +328,7 @@ def main(): # resize LV based on absolute values if int(lv_size) > this_lv['size']: extendlv_cmd = module.get_bin_path("extendlv", required=True) - cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) + cmd = test_opt + [extendlv_cmd, lv, "%sM" % (lv_size - this_lv['size'], )] rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index 03b87e78f0..07da307633 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -170,7 +170,7 @@ def local_rpm_package_name(path): def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) + rc, out, err = module.run_command([RPM_PATH, "-q", name]) if rc == 0: return True else: @@ -203,7 +203,7 @@ def query_package_provides(module, name, allow_upgrade=False): name = local_rpm_package_name(name) - rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) + rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name]) if rc == 0: if not allow_upgrade: return True @@ -253,7 +253,7 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"}) + rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"}) if rc != 0: module.fail_json(msg="failed to remove %s: %s" % (package, err)) @@ -271,14 +271,14 @@ def install_packages(module, pkgspec, allow_upgrade=False): if pkgspec is None: return (False, "Empty package list") - packages = "" + packages = [] for package in pkgspec: if not query_package_provides(module, package, allow_upgrade=allow_upgrade): - packages += "'%s' " % package + packages.append(package) - if len(packages) != 0: - - rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"}) + if packages: + command = [APT_PATH, "-y", "install"] + packages + rc, out, err = module.run_command(command, environ_update={"LANG": "C"}) installed = True for package in pkgspec: @@ -287,7 +287,7 @@ def install_packages(module, pkgspec, allow_upgrade=False): # apt-rpm always have 0 for exit code if --force is used if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) + module.fail_json(msg="'%s' failed: %s" % (" ".join(command), err)) else: return (True, "%s present(s)" % packages) else: diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py index 864bb65a66..35327bfe02 100644 --- a/plugins/modules/btrfs_subvolume.py +++ b/plugins/modules/btrfs_subvolume.py @@ -572,10 +572,7 @@ class BtrfsSubvolumeModule(object): self.__temporary_mounts[cache_key] = mountpoint mount = self.module.get_bin_path("mount", required=True) - command = "%s -o noatime,subvolid=%d %s %s " % (mount, - subvolid, - device, - mountpoint) + command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint] result = self.module.run_command(command, check_rc=True) return mountpoint @@ -586,10 +583,10 @@ class BtrfsSubvolumeModule(object): def __cleanup_mount(self, mountpoint): umount = self.module.get_bin_path("umount", required=True) - result = self.module.run_command("%s %s" % (umount, mountpoint)) + result = self.module.run_command([umount, mountpoint]) if result[0] == 0: rmdir = self.module.get_bin_path("rmdir", required=True) - self.module.run_command("%s %s" % (rmdir, mountpoint)) + self.module.run_command([rmdir, mountpoint]) # Format and return results def get_results(self): diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py index 4b5a6949c6..1531d2cad2 100644 --- a/plugins/modules/installp.py +++ b/plugins/modules/installp.py @@ -106,7 +106,7 @@ def _check_new_pkg(module, package, repository_path): if os.path.isdir(repository_path): installp_cmd = module.get_bin_path('installp', True) - rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) + rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) @@ -142,7 +142,7 @@ def _check_installed_pkg(module, package, repository_path): """ lslpp_cmd = module.get_bin_path('lslpp', True) - rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) + rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", "%s*" % (package, )]) if rc == 1: package_state = ' '.join(err.split()[-2:]) @@ -173,7 +173,7 @@ def remove(module, installp_cmd, packages): if pkg_check: if not module.check_mode: - rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) + rc, remove_out, err = module.run_command([installp_cmd, "-u", package]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) remove_count += 1 @@ -202,8 +202,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): already_installed_pkgs = {} accept_license_param = { - True: '-Y', - False: '', + True: ['-Y'], + False: [], } # Validate if package exists on repository path. @@ -230,7 +230,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): else: if not module.check_mode: - rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) + rc, out, err = module.run_command( + [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package]) if rc != 0: module.fail_json(msg="Failed to run installp", rc=rc, err=err) installed_pkgs.append(package) diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py index 8a6384369a..7ff7e3a2e7 100644 --- a/plugins/modules/lvg.py +++ b/plugins/modules/lvg.py @@ -179,7 +179,7 @@ def parse_vgs(data): def find_mapper_device_name(module, dm_device): dmsetup_cmd = module.get_bin_path('dmsetup', True) mapper_prefix = '/dev/mapper/' - rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + rc, dm_name, err = module.run_command([dmsetup_cmd, "info", "-C", "--noheadings", "-o", "name", dm_device]) if rc != 0: module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) mapper_device = mapper_prefix + dm_name.rstrip() @@ -204,7 +204,7 @@ def find_vg(module, vg): if not vg: return None vgs_cmd = module.get_bin_path('vgs', True) - dummy, current_vgs, dummy = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd, check_rc=True) + dummy, current_vgs, dummy = module.run_command([vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True) vgs = parse_vgs(current_vgs) @@ -431,10 +431,10 @@ def main(): for x in itertools.chain(dev_list, module.params['pvs']) ) pvs_filter_vg_name = 'vg_name = {0}'.format(vg) - pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) + pvs_filter = ["--select", "{0} || {1}".format(pvs_filter_pv_name, pvs_filter_vg_name)] else: - pvs_filter = '' - rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) + pvs_filter = [] + rc, current_pvs, err = module.run_command([pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter) if rc != 0: module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) @@ -473,7 +473,7 @@ def main(): if this_vg['lv_count'] == 0 or force: # remove VG vgremove_cmd = module.get_bin_path('vgremove', True) - rc, dummy, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + rc, dummy, err = module.run_command([vgremove_cmd, "--force", vg]) if rc == 0: module.exit_json(changed=True) else: @@ -509,7 +509,6 @@ def main(): changed = True else: if devs_to_add: - devs_to_add_string = ' '.join(devs_to_add) # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: @@ -520,21 +519,20 @@ def main(): module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) # add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) - rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + rc, dummy, err = module.run_command([vgextend_cmd, vg] + devs_to_add) if rc == 0: changed = True else: - module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + module.fail_json(msg="Unable to extend %s by %s." % (vg, ' '.join(devs_to_add)), rc=rc, err=err) # remove some PV from our VG if devs_to_remove: - devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) - rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + rc, dummy, err = module.run_command([vgreduce_cmd, "--force", vg] + devs_to_remove) if rc == 0: changed = True else: - module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + module.fail_json(msg="Unable to reduce %s by %s." % (vg, ' '.join(devs_to_remove)), rc=rc, err=err) module.exit_json(changed=changed) diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py index a2a870260a..3a2f5c7cdd 100644 --- a/plugins/modules/lvol.py +++ b/plugins/modules/lvol.py @@ -236,6 +236,7 @@ EXAMPLES = ''' ''' import re +import shlex from ansible.module_utils.basic import AnsibleModule @@ -281,7 +282,7 @@ def parse_vgs(data): def get_lvm_version(module): ver_cmd = module.get_bin_path("lvm", required=True) - rc, out, err = module.run_command("%s version" % (ver_cmd)) + rc, out, err = module.run_command([ver_cmd, "version"]) if rc != 0: return None m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) @@ -320,14 +321,14 @@ def main(): module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: - yesopt = "--yes" + yesopt = ["--yes"] else: - yesopt = "" + yesopt = [] vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] - opts = module.params['opts'] + opts = shlex.split(module.params['opts'] or '') state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) @@ -338,21 +339,13 @@ def main(): size_unit = 'm' size_operator = None snapshot = module.params['snapshot'] - pvs = module.params['pvs'] - - if pvs is None: - pvs = "" - else: - pvs = " ".join(pvs) - - if opts is None: - opts = "" + pvs = module.params['pvs'] or [] # Add --test option when running in check-mode if module.check_mode: - test_opt = ' --test' + test_opt = ['--test'] else: - test_opt = '' + test_opt = [] if size: # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing @@ -400,7 +393,7 @@ def main(): # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( - "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) + [vgs_cmd, "--noheadings", "--nosuffix", "-o", "vg_name,size,free,vg_extent_size", "--units", unit.lower(), "--separator", ";", vg]) if rc != 0: if state == 'absent': @@ -414,7 +407,7 @@ def main(): # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) + [lvs_cmd, "-a", "--noheadings", "--nosuffix", "-o", "lv_name,size,lv_attr", "--units", unit.lower(), "--separator", ";", vg]) if rc != 0: if state == 'absent': @@ -474,20 +467,23 @@ def main(): # create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + cmd = [lvcreate_cmd] + test_opt + yesopt if snapshot is not None: if size: - cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) - else: - cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) - elif thinpool and lv: - if size_opt == 'l': - module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") - size_opt = 'V' - cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) - elif thinpool and not lv: - cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["-s", "-n", snapshot] + opts + ["%s/%s" % (vg, lv)] + elif thinpool: + if lv: + if size_opt == 'l': + module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") + size_opt = 'V' + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + ["-T", "%s/%s" % (vg, thinpool)] else: - cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + [vg] + pvs rc, dummy, err = module.run_command(cmd) if rc == 0: changed = True @@ -499,7 +495,7 @@ def main(): if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) - rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvremove_cmd] + test_opt + ["--force", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=True) else: @@ -527,7 +523,7 @@ def main(): if this_lv['size'] < size_requested: if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): - tool = module.get_bin_path("lvextend", required=True) + tool = [module.get_bin_path("lvextend", required=True)] else: module.fail_json( msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % @@ -539,16 +535,17 @@ def main(): elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name'])) else: - tool = module.get_bin_path("lvreduce", required=True) - tool = '%s %s' % (tool, '--force') + tool = [module.get_bin_path("lvreduce", required=True), '--force'] if tool: if resizefs: - tool = '%s %s' % (tool, '--resizefs') + tool += ['--resizefs'] + cmd = tool + test_opt if size_operator: - cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] else: - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) @@ -566,23 +563,24 @@ def main(): # resize LV based on absolute values tool = None if float(size) > this_lv['size'] or size_operator == '+': - tool = module.get_bin_path("lvextend", required=True) + tool = [module.get_bin_path("lvextend", required=True)] elif shrink and float(size) < this_lv['size'] or size_operator == '-': if float(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name'])) else: - tool = module.get_bin_path("lvreduce", required=True) - tool = '%s %s' % (tool, '--force') + tool = [module.get_bin_path("lvreduce", required=True), '--force'] if tool: if resizefs: - tool = '%s %s' % (tool, '--resizefs') + tool += ['--resizefs'] + cmd = tool + test_opt if size_operator: - cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] else: - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) @@ -598,14 +596,14 @@ def main(): if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvchange_cmd, "-ay", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvchange_cmd, "-an", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py index e81fb9142c..cd620687d7 100644 --- a/plugins/modules/macports.py +++ b/plugins/modules/macports.py @@ -111,7 +111,7 @@ from ansible.module_utils.basic import AnsibleModule def selfupdate(module, port_path): """ Update Macports and the ports tree. """ - rc, out, err = module.run_command("%s -v selfupdate" % port_path) + rc, out, err = module.run_command([port_path, "-v", "selfupdate"]) if rc == 0: updated = any( @@ -135,7 +135,7 @@ def selfupdate(module, port_path): def upgrade(module, port_path): """ Upgrade outdated ports. """ - rc, out, err = module.run_command("%s upgrade outdated" % port_path) + rc, out, err = module.run_command([port_path, "upgrade", "outdated"]) # rc is 1 when nothing to upgrade so check stdout first. if out.strip() == "Nothing to upgrade.": @@ -182,7 +182,7 @@ def remove_ports(module, port_path, ports, stdout, stderr): if not query_port(module, port_path, port): continue - rc, out, err = module.run_command("%s uninstall %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "uninstall", port]) stdout += out stderr += err if query_port(module, port_path, port): @@ -206,7 +206,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr): if query_port(module, port_path, port): continue - rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant)) + rc, out, err = module.run_command([port_path, "install", port, variant]) stdout += out stderr += err if not query_port(module, port_path, port): @@ -232,7 +232,7 @@ def activate_ports(module, port_path, ports, stdout, stderr): if query_port(module, port_path, port, state="active"): continue - rc, out, err = module.run_command("%s activate %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "activate", port]) stdout += out stderr += err @@ -259,7 +259,7 @@ def deactivate_ports(module, port_path, ports, stdout, stderr): if not query_port(module, port_path, port, state="active"): continue - rc, out, err = module.run_command("%s deactivate %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "deactivate", port]) stdout += out stderr += err if query_port(module, port_path, port, state="active"): diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py index 382e47a475..b3616a8ecd 100644 --- a/plugins/modules/parted.py +++ b/plugins/modules/parted.py @@ -480,12 +480,12 @@ def get_device_info(device, unit): if label_needed: return get_unlabeled_device_info(device, unit) - command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit) + command = [parted_exec, "-s", "-m", device, "--", "unit", unit, "print"] rc, out, err = module.run_command(command) if rc != 0 and 'unrecognised disk label' not in err: module.fail_json(msg=( "Error while getting device information with parted " - "script: '%s'" % command), + "script: '%s'" % " ".join(command)), rc=rc, out=out, err=err ) @@ -506,7 +506,7 @@ def check_parted_label(device): return False # Older parted versions return a message in the stdout and RC > 0. - rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device)) + rc, out, err = module.run_command([parted_exec, "-s", "-m", device, "print"]) if rc != 0 and 'unrecognised disk label' in out.lower(): return True @@ -546,7 +546,7 @@ def parted_version(): """ global module, parted_exec # pylint: disable=global-variable-not-assigned - rc, out, err = module.run_command("%s --version" % parted_exec) + rc, out, err = module.run_command([parted_exec, "--version"]) if rc != 0: module.fail_json( msg="Failed to get parted version.", rc=rc, out=out, err=err @@ -580,6 +580,7 @@ def parted(script, device, align): script_option = '-s' if script and not module.check_mode: + # TODO: convert run_comand() argument to list! command = "%s %s -m %s %s -- %s" % (parted_exec, script_option, align_option, device, script) rc, out, err = module.run_command(command) diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py index 5b2e478b8c..8b29655d37 100644 --- a/plugins/modules/pkgin.py +++ b/plugins/modules/pkgin.py @@ -145,18 +145,18 @@ def query_package(module, name): """ # test whether '-p' (parsable) flag is supported. - rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) + rc, out, err = module.run_command([PKGIN_PATH, "-p", "-v"]) if rc == 0: - pflag = '-p' + pflag = ['-p'] splitchar = ';' else: - pflag = '' + pflag = [] splitchar = ' ' # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. - rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) + rc, out, err = module.run_command([PKGIN_PATH] + pflag + ["search", "^%s$" % name]) # rc will not be 0 unless the search was a success if rc == 0: @@ -234,22 +234,19 @@ def format_pkgin_command(module, command, package=None): # an empty string. Some commands (e.g. 'update') will ignore extra # arguments, however this behaviour cannot be relied on for others. if package is None: - package = "" + packages = [] + else: + packages = [package] if module.params["force"]: - force = "-F" + force = ["-F"] else: - force = "" - - vars = {"pkgin": PKGIN_PATH, - "command": command, - "package": package, - "force": force} + force = [] if module.check_mode: - return "%(pkgin)s -n %(command)s %(package)s" % vars + return [PKGIN_PATH, "-n", command] + packages else: - return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars + return [PKGIN_PATH, "-y"] + force + [command] + packages def remove_packages(module, packages): diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py index e263b71813..59dafb1eb8 100644 --- a/plugins/modules/portinstall.py +++ b/plugins/modules/portinstall.py @@ -79,12 +79,13 @@ def query_package(module, name): if pkg_info_path: pkgng = False pkg_glob_path = module.get_bin_path('pkg_glob', True) + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) + pkg_info_path = [pkg_info_path] else: pkgng = True - pkg_info_path = module.get_bin_path('pkg', True) - pkg_info_path = pkg_info_path + " info" - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) + pkg_info_path = [module.get_bin_path('pkg', True), "info"] + rc, out, err = module.run_command(pkg_info_path + [name]) found = rc == 0 @@ -94,10 +95,7 @@ def query_package(module, name): # some package is installed name_without_digits = re.sub('[0-9]', '', name) if name != name_without_digits: - if pkgng: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - else: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + rc, out, err = module.run_command(pkg_info_path + [name_without_digits]) found = rc == 0 @@ -107,13 +105,13 @@ def query_package(module, name): def matching_packages(module, name): ports_glob_path = module.get_bin_path('ports_glob', True) - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) + rc, out, err = module.run_command([ports_glob_path, name]) # counts the number of packages found occurrences = out.count('\n') if occurrences == 0: name_without_digits = re.sub('[0-9]', '', name) if name != name_without_digits: - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) + rc, out, err = module.run_command([ports_glob_path, name_without_digits]) occurrences = out.count('\n') return occurrences @@ -135,10 +133,12 @@ def remove_packages(module, packages): if not query_package(module, package): continue + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) if query_package(module, package): name_without_digits = re.sub('[0-9]', '', package) + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(name_without_digits)), use_unsafe_shell=True) @@ -163,13 +163,13 @@ def install_packages(module, packages, use_packages): if not portinstall_path: pkg_path = module.get_bin_path('pkg', False) if pkg_path: - module.run_command("pkg install -y portupgrade") + module.run_command([pkg_path, "install", "-y", "portupgrade"]) portinstall_path = module.get_bin_path('portinstall', True) if use_packages: - portinstall_params = "--use-packages" + portinstall_params = ["--use-packages"] else: - portinstall_params = "" + portinstall_params = [] for package in packages: if query_package(module, package): @@ -178,7 +178,7 @@ def install_packages(module, packages, use_packages): # TODO: check how many match matches = matching_packages(module, package) if matches == 1: - rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) + rc, out, err = module.run_command([portinstall_path, "--batch"] + portinstall_params + [package]) if not query_package(module, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) elif matches == 0: diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py index e3d7a15429..9347db1591 100644 --- a/plugins/modules/slackpkg.py +++ b/plugins/modules/slackpkg.py @@ -106,9 +106,8 @@ def remove_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - remove %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "remove", package]) if not module.check_mode and query_package(module, slackpkg_path, package): @@ -132,9 +131,8 @@ def install_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - install %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "install", package]) if not module.check_mode and not query_package(module, slackpkg_path, package): @@ -155,9 +153,8 @@ def upgrade_packages(module, slackpkg_path, packages): for package in packages: if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - upgrade %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package]) if not module.check_mode and not query_package(module, slackpkg_path, package): @@ -174,7 +171,8 @@ def upgrade_packages(module, slackpkg_path, packages): def update_cache(module, slackpkg_path): - rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path)) + rc, out, err = module.run_command( + [slackpkg_path, "-batch=on", "update"]) if rc != 0: module.fail_json(msg="Could not update package cache") diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py index db9902c770..56ded66e62 100644 --- a/plugins/modules/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -120,7 +120,7 @@ def package_installed(module, name, category): if category: cmd.append('-c') cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) + rc, out, err = module.run_command(cmd) if rc == 0: return True else: diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py index 28a8ce3145..9ba1b02b30 100644 --- a/plugins/modules/swdepot.py +++ b/plugins/modules/swdepot.py @@ -68,7 +68,6 @@ EXAMPLES = ''' import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote def compare_package(version1, version2): @@ -94,13 +93,13 @@ def compare_package(version1, version2): def query_package(module, name, depot=None): """ Returns whether a package is installed or not and version. """ - cmd_list = '/usr/sbin/swlist -a revision -l product' + cmd_list = ['/usr/sbin/swlist', '-a', 'revision', '-l', 'product'] if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)), - use_unsafe_shell=True) - else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True) + cmd_list.extend(['-s', depot]) + cmd_list.append(name) + rc, stdout, stderr = module.run_command(cmd_list) if rc == 0: + stdout = ''.join(line for line in stdout.splitlines(True) if name in line) version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] else: version = None @@ -112,7 +111,7 @@ def remove_package(module, name): """ Uninstall package if installed. """ cmd_remove = '/usr/sbin/swremove' - rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) + rc, stdout, stderr = module.run_command([cmd_remove, name]) if rc == 0: return rc, stdout @@ -123,8 +122,8 @@ def remove_package(module, name): def install_package(module, depot, name): """ Install package if not already installed """ - cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' - rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) + cmd_install = ['/usr/sbin/swinstall', '-x', 'mount_all_filesystems=false'] + rc, stdout, stderr = module.run_command(cmd_install + ["-s", depot, name]) if rc == 0: return rc, stdout else: From ea719649bb87e5ea1515f1a40af2e4748009571a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 1 May 2024 21:30:02 +0200 Subject: [PATCH 058/482] apt_rpm: deprecate behavior of state=present and state=installed (#8285) * Deprecate behavior of state=present and state=installed. * Fix changelog fragment. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .../fragments/8285-apt_rpm-state-deprecate.yml | 7 +++++++ plugins/modules/apt_rpm.py | 12 ++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 changelogs/fragments/8285-apt_rpm-state-deprecate.yml diff --git a/changelogs/fragments/8285-apt_rpm-state-deprecate.yml b/changelogs/fragments/8285-apt_rpm-state-deprecate.yml new file mode 100644 index 0000000000..19f3415841 --- /dev/null +++ b/changelogs/fragments/8285-apt_rpm-state-deprecate.yml @@ -0,0 +1,7 @@ +deprecated_features: + - "apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated and will change in community.general 11.0.0. + Right now the module will upgrade a package to the latest version if one of these two states is used. You should explicitly + use ``state=latest`` if you want this behavior, and switch to ``state=present_not_latest`` if you do not want to upgrade the + package if it is already installed. In community.general 11.0.0 the behavior of ``state=present`` and ``state=installed`` will + change to that of ``state=present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, + https://github.com/ansible-collections/community.general/pull/8285)." diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index 07da307633..3a0b6d805f 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -310,6 +310,18 @@ def main(): module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") p = module.params + if p['state'] in ['installed', 'present']: + module.deprecate( + 'state=%s currently behaves unexpectedly by always upgrading to the latest version if' + ' the package is already installed. This behavior is deprecated and will change in' + ' community.general 11.0.0. You can use state=latest to explicitly request this behavior' + ' or state=present_not_latest to explicitly request the behavior that state=%s will have' + ' in community.general 11.0.0, namely that the package will not be upgraded if it is' + ' already installed.' % (p['state'], p['state']), + version='11.0.0', + collection_name='community.general', + ) + modified = False output = "" From 3eeafecd1fcdb8096ac0b106101beb87ac8c1c4d Mon Sep 17 00:00:00 2001 From: Kit Ham Date: Sat, 4 May 2024 23:25:21 +1000 Subject: [PATCH 059/482] homebrew: Add force_formula parameter to pass --formula to brew command (#8275) * homebrew: Add force_formula parameter to pass --formula to brew command Some formulas have names that are also cask formulas (e.g. docker). When trying to install such a formula, brew prints a warning and returns a non-zero exit code. This causes Ansible to halt and report the failure. By allowing the task to set force_formula, we can sidestep this problem. * Add changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/homebrew.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8274-homebrew-force-formula.yml | 2 + plugins/modules/homebrew.py | 37 +++++++++++++++--- .../targets/docker/handlers/main.yml | 11 ++++++ .../integration/targets/docker/tasks/main.yml | 39 +++++++++++++++++++ 4 files changed, 84 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/8274-homebrew-force-formula.yml create mode 100644 tests/integration/targets/docker/handlers/main.yml create mode 100644 tests/integration/targets/docker/tasks/main.yml diff --git a/changelogs/fragments/8274-homebrew-force-formula.yml b/changelogs/fragments/8274-homebrew-force-formula.yml new file mode 100644 index 0000000000..4a9e471f4c --- /dev/null +++ b/changelogs/fragments/8274-homebrew-force-formula.yml @@ -0,0 +1,2 @@ +minor_changes: + - "homebrew - adds ``force_formula`` parameter to disambiguate a formula from a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274)." \ No newline at end of file diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 5d471797a7..144d73a5a6 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -76,6 +76,13 @@ options: type: list elements: str version_added: '0.2.0' + force_formula: + description: + - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). + - To install a cask, use the M(community.general.homebrew_cask) module. + type: bool + default: false + version_added: 9.0.0 notes: - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option. @@ -141,6 +148,12 @@ EXAMPLES = ''' community.general.homebrew: upgrade_all: true upgrade_options: ignore-pinned + +- name: Force installing a formula whose name is also a cask name + community.general.homebrew: + name: ambiguous_formula + state: present + force_formula: true ''' RETURN = ''' @@ -404,7 +417,8 @@ class Homebrew(object): def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, - install_options=None, upgrade_options=None): + install_options=None, upgrade_options=None, + force_formula=False): if not install_options: install_options = list() if not upgrade_options: @@ -414,7 +428,8 @@ class Homebrew(object): state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options,) + upgrade_options=upgrade_options, + force_formula=force_formula) self._prep() @@ -487,6 +502,8 @@ class Homebrew(object): "--json=v2", self.current_package, ] + if self.force_formula: + cmd.append("--formula") rc, out, err = self.module.run_command(cmd) if err: self.failed = True @@ -632,10 +649,15 @@ class Homebrew(object): else: head = None + if self.force_formula: + formula = '--formula' + else: + formula = None + opts = ( [self.brew_path, 'install'] + self.install_options - + [self.current_package, head] + + [self.current_package, head, formula] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -919,7 +941,11 @@ def main(): default=None, type='list', elements='str', - ) + ), + force_formula=dict( + default=False, + type='bool', + ), ), supports_check_mode=True, ) @@ -951,6 +977,7 @@ def main(): if state in ('absent', 'removed', 'uninstalled'): state = 'absent' + force_formula = p['force_formula'] update_homebrew = p['update_homebrew'] if not update_homebrew: module.run_command_environ_update.update( @@ -967,7 +994,7 @@ def main(): brew = Homebrew(module=module, path=path, packages=packages, state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options) + upgrade_options=upgrade_options, force_formula=force_formula) (failed, changed, message) = brew.run() changed_pkgs = brew.changed_pkgs unchanged_pkgs = brew.unchanged_pkgs diff --git a/tests/integration/targets/docker/handlers/main.yml b/tests/integration/targets/docker/handlers/main.yml new file mode 100644 index 0000000000..90a2e8017d --- /dev/null +++ b/tests/integration/targets/docker/handlers/main.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: uninstall docker + community.general.homebrew: + name: docker + state: absent + become: true + become_user: "{{ brew_stat.stat.pw_name }}" diff --git a/tests/integration/targets/docker/tasks/main.yml b/tests/integration/targets/docker/tasks/main.yml new file mode 100644 index 0000000000..fd636247f4 --- /dev/null +++ b/tests/integration/targets/docker/tasks/main.yml @@ -0,0 +1,39 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- when: ansible_facts.distribution == 'MacOSX' + block: + - name: MACOS | Find brew binary + command: which brew + register: brew_which + + - name: MACOS | Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + + - name: MACOS | Install docker without --formula + community.general.homebrew: + name: docker + state: present + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + ignore_errors: true + register: result + + - name: Check that installing docker without --formula raises warning + assert: + that: + - result is failed + + - name: MACOS | Install docker + community.general.homebrew: + name: docker + state: present + force_formula: true + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + notify: + - uninstall docker From d75dee3230fcd66f590ac5c17ab67214e6944852 Mon Sep 17 00:00:00 2001 From: John Bond Date: Sat, 4 May 2024 15:26:56 +0200 Subject: [PATCH 060/482] 8281: puppet waitforlock (#8282) puppet waitforlock Add support for the waitforlock[1] puppet argument [1]https://www.puppet.com/docs/puppet/8/configuration#maxwaitforlock Co-authored-by: Felix Fontein --- .../fragments/8281-puppet-waitforlock.yaml | 2 ++ plugins/module_utils/puppet.py | 1 + plugins/modules/puppet.py | 19 +++++++++-- tests/unit/plugins/modules/test_puppet.yaml | 32 +++++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8281-puppet-waitforlock.yaml diff --git a/changelogs/fragments/8281-puppet-waitforlock.yaml b/changelogs/fragments/8281-puppet-waitforlock.yaml new file mode 100644 index 0000000000..bd8a820170 --- /dev/null +++ b/changelogs/fragments/8281-puppet-waitforlock.yaml @@ -0,0 +1,2 @@ +minor_changes: + - puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282). diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py index f05b0673f6..e06683b3ee 100644 --- a/plugins/module_utils/puppet.py +++ b/plugins/module_utils/puppet.py @@ -103,6 +103,7 @@ def puppet_runner(module): modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"), _execute=cmd_runner_fmt.as_func(execute_func), summarize=cmd_runner_fmt.as_bool("--summarize"), + waitforlock=cmd_runner_fmt.as_opt_val("--waitforlock"), debug=cmd_runner_fmt.as_bool("--debug"), verbose=cmd_runner_fmt.as_bool("--verbose"), ), diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index b28583fe05..073a083247 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -101,6 +101,12 @@ options: - Whether to print a transaction summary. type: bool default: false + waitforlock: + description: + - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. + - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes and V(h) for hours. + type: str + version_added: 9.0.0 verbose: description: - Print extra information. @@ -159,6 +165,14 @@ EXAMPLES = r''' skip_tags: - service +- name: Wait 30 seconds for any current puppet runs to finish + community.general.puppet: + waitforlock: 30 + +- name: Wait 5 minutes for any current puppet runs to finish + community.general.puppet: + waitforlock: 5m + - name: Run puppet agent in noop mode community.general.puppet: noop: true @@ -214,6 +228,7 @@ def main(): skip_tags=dict(type='list', elements='str'), execute=dict(type='str'), summarize=dict(type='bool', default=False), + waitforlock=dict(type='str'), debug=dict(type='bool', default=False), verbose=dict(type='bool', default=False), use_srv_records=dict(type='bool'), @@ -247,11 +262,11 @@ def main(): runner = puppet_utils.puppet_runner(module) if not p['manifest'] and not p['execute']: - args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records" + args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records waitforlock" with runner(args_order) as ctx: rc, stdout, stderr = ctx.run() else: - args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose" + args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose waitforlock" with runner(args_order) as ctx: rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) diff --git a/tests/unit/plugins/modules/test_puppet.yaml b/tests/unit/plugins/modules/test_puppet.yaml index 308be97975..7909403cfb 100644 --- a/tests/unit/plugins/modules/test_puppet.yaml +++ b/tests/unit/plugins/modules/test_puppet.yaml @@ -190,3 +190,35 @@ rc: 0 out: "" err: "" +- id: puppet_agent_waitforlock + input: + waitforlock: 30 + output: + changed: false + run_command_calls: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: "blah, anything" + err: "" + - command: + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --waitforlock + - "30" + environ: *env-def + rc: 0 + out: "" + err: "" From 4bd68ac1535e8edf394acc9285c4e1fb8e0794a3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 5 May 2024 20:32:11 +0200 Subject: [PATCH 061/482] Disable cpanm tests for RHEL 7 and CentOS 7 (#8312) Disable cpanm tests for RHEL 7 and CentOS 7. --- tests/integration/targets/cpanm/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/cpanm/tasks/main.yml b/tests/integration/targets/cpanm/tasks/main.yml index c9adc1ca6b..89650154f2 100644 --- a/tests/integration/targets/cpanm/tasks/main.yml +++ b/tests/integration/targets/cpanm/tasks/main.yml @@ -6,7 +6,8 @@ - name: bail out for non-supported platforms meta: end_play when: - - (ansible_os_family != "RedHat" or ansible_distribution_major_version|int < 7) + - (ansible_os_family != "RedHat" or ansible_distribution_major_version|int < 8) # TODO: bump back to 7 + - (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int < 8) # TODO: remove - ansible_os_family != "Debian" - name: install perl development package for Red Hat family From bc609d74a023e3ea3b2c5621c1ea8de2b5d3ee17 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 5 May 2024 20:32:22 +0200 Subject: [PATCH 062/482] Disable ejabberd tests on Arch Linux (#8313) Disable ejabberd tests on Arch Linux. --- tests/integration/targets/ejabberd_user/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/ejabberd_user/tasks/main.yml b/tests/integration/targets/ejabberd_user/tasks/main.yml index 33e07b785a..349b3f952f 100644 --- a/tests/integration/targets/ejabberd_user/tasks/main.yml +++ b/tests/integration/targets/ejabberd_user/tasks/main.yml @@ -10,7 +10,8 @@ - name: Bail out if not supported ansible.builtin.meta: end_play - when: ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora') + # TODO: remove Archlinux from the list + when: ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora', 'Archlinux') - name: Remove ejabberd From feb443d260ca5fa24f870c483e33f71c4cc18398 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 7 May 2024 07:50:26 +0200 Subject: [PATCH 063/482] Fix django_manage tests (#8325) Ensure that manage.py is executable. --- tests/integration/targets/django_manage/tasks/main.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/targets/django_manage/tasks/main.yaml b/tests/integration/targets/django_manage/tasks/main.yaml index c07b538938..5307fb6642 100644 --- a/tests/integration/targets/django_manage/tasks/main.yaml +++ b/tests/integration/targets/django_manage/tasks/main.yaml @@ -43,6 +43,11 @@ chdir: "{{ tmp_django_root.path }}/startproj" cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startapp app1" +- name: Make manage.py executable + file: + path: "{{ tmp_django_root.path }}/startproj/test_django_manage_1/manage.py" + mode: "0755" + - name: Check community.general.django_manage: project_path: "{{ tmp_django_root.path }}/startproj/test_django_manage_1" From cb985b31f91be2619ee472b8e8ece57fa03c5965 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20GATELLIER?= <26511053+lgatellier@users.noreply.github.com> Date: Fri, 10 May 2024 15:15:18 +0200 Subject: [PATCH 064/482] docs(gitlab_runner): improve docs and add examples (#8310) --- plugins/modules/gitlab_runner.py | 53 +++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index e6163a6b6c..96b3eb3fa4 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -15,17 +15,20 @@ DOCUMENTATION = ''' module: gitlab_runner short_description: Create, modify and delete GitLab Runners description: - - Register, update and delete runners with the GitLab API. + - Register, update and delete runners on GitLab Server side with the GitLab API. - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) + and U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at U(https://$GITLAB_URL/profile/personal_access_tokens). - A valid registration token is required for registering a new runner. To create shared runners, you need to ask your administrator to give you this token. It can be found at U(https://$GITLAB_URL/admin/runners/). + - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through its API. + Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command notes: - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required. - - Runners need to have unique descriptions. + - Runners need to have unique descriptions, since this attribute is used as key for idempotency author: - Samy Coenen (@SamyCoenen) - Guillaume Martinez (@Lunik) @@ -153,7 +156,45 @@ options: ''' EXAMPLES = ''' -- name: "Register runner" +- name: Create an instance-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a group-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + group: top-level-group/subgroup + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a project-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + project: top-level-group/subgroup/project + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Register instance-level runner with registration token (deprecated)" community.general.gitlab_runner: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" @@ -164,6 +205,7 @@ EXAMPLES = ''' tag_list: ['docker'] run_untagged: false locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task - name: "Delete runner" community.general.gitlab_runner: @@ -180,7 +222,7 @@ EXAMPLES = ''' owned: true state: absent -- name: Register runner for a specific project +- name: "Register a project-level runner with registration token (deprecated)" community.general.gitlab_runner: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" @@ -188,6 +230,7 @@ EXAMPLES = ''' description: MyProject runner state: present project: mygroup/mysubgroup/myproject + register: runner # Register module output to run C(gitlab-runner register) command in another task ''' RETURN = ''' From bc7ad0f0ea6e6a7382488e99d0a54ea84324269f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 10 May 2024 16:07:32 +0200 Subject: [PATCH 065/482] CONTRIBUTING.md: update link for changelog fragments, and add more text on them (#8322) Update link for changelog fragments, and add more text on them. --- CONTRIBUTING.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 199e90c5b1..5363b4daca 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,7 +31,9 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * Try committing your changes with an informative but short commit message. * Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout. -* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-a-changelog-fragment). + * You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. (If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) + * Please always include a link to the pull request itself, and if the PR is about an issue, also a link to the issue. Also make sure the fragment ends with a period, and begins with a lower-case letter after `-`. (Again, if you don't do this, we'll add suggestions to fix it, so don't worry too much :) ) * Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed. You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). From 136419c5c0bb7d2822ca9ced26d7a37d0e1861be Mon Sep 17 00:00:00 2001 From: Alexander Petrenz Date: Sat, 11 May 2024 16:51:51 +0200 Subject: [PATCH 066/482] bug(lookup/merge_variables): Fix rendering foreign variables (#8303) * manually prepare variables of foreign host including hostvars property * render variables from context of current host * add integration test for cross host merge * lint fixes * adjust cross host merge unit tests to provide a tiny bit of the HostVars Class API * add license information * lint * add changelog fragment * Update tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml Okay Co-authored-by: Mark <40321020+m-a-r-k-e@users.noreply.github.com> * Update tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml Okay Co-authored-by: Mark <40321020+m-a-r-k-e@users.noreply.github.com> * Update tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml Okay Co-authored-by: Mark <40321020+m-a-r-k-e@users.noreply.github.com> * rename _HostVars to HostVarsMock * removing unnecessary task --------- Co-authored-by: Gitlab CI Co-authored-by: Mark <40321020+m-a-r-k-e@users.noreply.github.com> --- .../8303-fix-rendering-foreign-variables.yaml | 2 + plugins/lookup/merge_variables.py | 7 +- .../targets/lookup_merge_variables/runme.sh | 3 + .../test_cross_host_merge_inventory.yml | 33 ++++ .../test_cross_host_merge_play.yml | 21 ++ .../plugins/lookup/test_merge_variables.py | 182 ++++++++++-------- 6 files changed, 165 insertions(+), 83 deletions(-) create mode 100644 changelogs/fragments/8303-fix-rendering-foreign-variables.yaml create mode 100644 tests/integration/targets/lookup_merge_variables/test_cross_host_merge_inventory.yml create mode 100644 tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml diff --git a/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml b/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml new file mode 100644 index 0000000000..c2162771f2 --- /dev/null +++ b/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml @@ -0,0 +1,2 @@ +bugfixes: + - "merge_variables lookup plugin - fixing cross host merge: providing access to foreign hosts variables to the perspective of the host that is performing the merge (https://github.com/ansible-collections/community.general/pull/8303)." diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index 4fc33014c0..ce7621ad23 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -157,7 +157,9 @@ class LookupModule(LookupBase): cross_host_merge_result = initial_value for host in variables["hostvars"]: if self._is_host_in_allowed_groups(variables["hostvars"][host]["group_names"]): - cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, variables["hostvars"][host]) + host_variables = dict(variables["hostvars"].raw_get(host)) + host_variables["hostvars"] = variables["hostvars"] # re-add hostvars + cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, host_variables) ret.append(cross_host_merge_result) return ret @@ -195,7 +197,8 @@ class LookupModule(LookupBase): result = initial_value for var_name in var_merge_names: - var_value = self._templar.template(variables[var_name]) # Render jinja2 templates + with self._templar.set_temporary_context(available_variables=variables): # tmp. switch renderer to context of current variables + var_value = self._templar.template(variables[var_name]) # Render jinja2 templates var_type = _verify_and_get_type(var_value) if prev_var_type is None: diff --git a/tests/integration/targets/lookup_merge_variables/runme.sh b/tests/integration/targets/lookup_merge_variables/runme.sh index 4e66476be4..ada6908dd7 100755 --- a/tests/integration/targets/lookup_merge_variables/runme.sh +++ b/tests/integration/targets/lookup_merge_variables/runme.sh @@ -14,3 +14,6 @@ ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE=suffix \ ANSIBLE_LOG_PATH=/tmp/ansible-test-merge-variables \ ansible-playbook -i test_inventory_all_hosts.yml test_all_hosts.yml "$@" + +ANSIBLE_LOG_PATH=/tmp/ansible-test-merge-variables \ + ansible-playbook -i test_cross_host_merge_inventory.yml test_cross_host_merge_play.yml "$@" diff --git a/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_inventory.yml b/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_inventory.yml new file mode 100644 index 0000000000..938457023e --- /dev/null +++ b/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_inventory.yml @@ -0,0 +1,33 @@ +--- +# Copyright (c) 2020, Thales Netherlands +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +common: + vars: + provider_instances: + servicedata1: + host: "{{ hostvars[groups['provider'] | first].inventory_hostname }}" + user: usr + pass: pwd + servicedata2: + host: down + user: usr2 + pass: pwd2 + hosts: + host1: + host2: + +consumer: + vars: + service_data: "{{ provider_instances.servicedata1 }}" + merge2__1: "{{ service_data }}" # service_data is a variable only known to host2, so normally it´s not available for host1 that is performing the merge + hosts: + host2: + +provider: + vars: + merge_result: "{{ lookup('community.general.merge_variables', 'merge2__', pattern_type='prefix', groups=['consumer']) }}" + hosts: + host1: diff --git a/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml b/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml new file mode 100644 index 0000000000..51cd6f1ba3 --- /dev/null +++ b/tests/integration/targets/lookup_merge_variables/test_cross_host_merge_play.yml @@ -0,0 +1,21 @@ +--- +# Copyright (c) 2020, Thales Netherlands +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test merge_variables lookup plugin (merging host reference variables) + hosts: host1 + connection: local + gather_facts: false + tasks: + - name: Print merge result + ansible.builtin.debug: + msg: "{{ merge_result }}" + - name: Validate merge result + ansible.builtin.assert: + that: + - "merge_result | length == 3" + - "merge_result.host == 'host1'" + - "merge_result.user == 'usr'" + - "merge_result.pass == 'pwd'" diff --git a/tests/unit/plugins/lookup/test_merge_variables.py b/tests/unit/plugins/lookup/test_merge_variables.py index 66cb2f08bb..ba8209439a 100644 --- a/tests/unit/plugins/lookup/test_merge_variables.py +++ b/tests/unit/plugins/lookup/test_merge_variables.py @@ -18,6 +18,17 @@ from ansible_collections.community.general.plugins.lookup import merge_variables class TestMergeVariablesLookup(unittest.TestCase): + class HostVarsMock(dict): + + def __getattr__(self, item): + return super().__getitem__(item) + + def __setattr__(self, item, value): + return super().__setitem__(item, value) + + def raw_get(self, host): + return super().__getitem__(host) + def setUp(self): self.loader = DictDataLoader({}) self.templar = Templar(loader=self.loader, variables={}) @@ -141,25 +152,28 @@ class TestMergeVariablesLookup(unittest.TestCase): {'var': [{'item5': 'value5', 'item6': 'value6'}]}, ]) def test_merge_dict_group_all(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_var'], { - 'inventory_hostname': 'host1', - 'hostvars': { - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } + hostvars = self.HostVarsMock({ + 'host1': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host1', + '1testlist__merge_var': { + 'var': [{'item1': 'value1', 'item2': 'value2'}] + } + }, + 'host2': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host2', + '2otherlist__merge_var': { + 'var': [{'item5': 'value5', 'item6': 'value6'}] } } }) + variables = { + 'inventory_hostname': 'host1', + 'hostvars': hostvars + } + + results = self.merge_vars_lookup.run(['__merge_var'], variables) self.assertEqual(results, [ {'var': [ @@ -175,32 +189,35 @@ class TestMergeVariablesLookup(unittest.TestCase): {'var': [{'item5': 'value5', 'item6': 'value6'}]}, ]) def test_merge_dict_group_single(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_var'], { - 'inventory_hostname': 'host1', - 'hostvars': { - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } - }, - 'host3': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': { - 'var': [{'item3': 'value3', 'item4': 'value4'}] - } + hostvars = self.HostVarsMock({ + 'host1': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host1', + '1testlist__merge_var': { + 'var': [{'item1': 'value1', 'item2': 'value2'}] + } + }, + 'host2': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host2', + '2otherlist__merge_var': { + 'var': [{'item5': 'value5', 'item6': 'value6'}] + } + }, + 'host3': { + 'group_names': ['dummy2'], + 'inventory_hostname': 'host3', + '3otherlist__merge_var': { + 'var': [{'item3': 'value3', 'item4': 'value4'}] } } }) + variables = { + 'inventory_hostname': 'host1', + 'hostvars': hostvars + } + + results = self.merge_vars_lookup.run(['__merge_var'], variables) self.assertEqual(results, [ {'var': [ @@ -216,32 +233,34 @@ class TestMergeVariablesLookup(unittest.TestCase): {'var': [{'item5': 'value5', 'item6': 'value6'}]}, ]) def test_merge_dict_group_multiple(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_var'], { - 'inventory_hostname': 'host1', - 'hostvars': { - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } - }, - 'host3': { - 'group_names': ['dummy3'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': { - 'var': [{'item3': 'value3', 'item4': 'value4'}] - } + hostvars = self.HostVarsMock({ + 'host1': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host1', + '1testlist__merge_var': { + 'var': [{'item1': 'value1', 'item2': 'value2'}] + } + }, + 'host2': { + 'group_names': ['dummy2'], + 'inventory_hostname': 'host2', + '2otherlist__merge_var': { + 'var': [{'item5': 'value5', 'item6': 'value6'}] + } + }, + 'host3': { + 'group_names': ['dummy3'], + 'inventory_hostname': 'host3', + '3otherlist__merge_var': { + 'var': [{'item3': 'value3', 'item4': 'value4'}] } } }) + variables = { + 'inventory_hostname': 'host1', + 'hostvars': hostvars + } + results = self.merge_vars_lookup.run(['__merge_var'], variables) self.assertEqual(results, [ {'var': [ @@ -257,26 +276,27 @@ class TestMergeVariablesLookup(unittest.TestCase): ['item5'], ]) def test_merge_list_group_multiple(self, mock_set_options, mock_get_option, mock_template): - print() - results = self.merge_vars_lookup.run(['__merge_var'], { - 'inventory_hostname': 'host1', - 'hostvars': { - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': ['item1'] - }, - 'host2': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': ['item5'] - }, - 'host3': { - 'group_names': ['dummy3'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': ['item3'] - } + hostvars = self.HostVarsMock({ + 'host1': { + 'group_names': ['dummy1'], + 'inventory_hostname': 'host1', + '1testlist__merge_var': ['item1'] + }, + 'host2': { + 'group_names': ['dummy2'], + 'inventory_hostname': 'host2', + '2otherlist__merge_var': ['item5'] + }, + 'host3': { + 'group_names': ['dummy3'], + 'inventory_hostname': 'host3', + '3otherlist__merge_var': ['item3'] } }) + variables = { + 'inventory_hostname': 'host1', + 'hostvars': hostvars + } + results = self.merge_vars_lookup.run(['__merge_var'], variables) self.assertEqual(results, [['item1', 'item5']]) From 3b7f13c58e763d6c62cbdc8a265ab6afbe7724bc Mon Sep 17 00:00:00 2001 From: Kit Ham Date: Sun, 12 May 2024 00:52:43 +1000 Subject: [PATCH 067/482] homebrew: Move repeated logic from homebrew modules into module_utils (#8324) * gomebrew: Move repeated logic from homebrew modules into module_utils Fixes #8323. * ghangelog + unit test improvement * Update changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...3-refactor-homebrew-logic-module-utils.yml | 2 + plugins/module_utils/homebrew.py | 115 +++++++++++++++++ plugins/modules/homebrew.py | 116 ++---------------- plugins/modules/homebrew_cask.py | 70 +---------- tests/unit/plugins/modules/test_homebrew.py | 19 +-- 5 files changed, 145 insertions(+), 177 deletions(-) create mode 100644 changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml create mode 100644 plugins/module_utils/homebrew.py diff --git a/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml b/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml new file mode 100644 index 0000000000..d29aed5ae4 --- /dev/null +++ b/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml @@ -0,0 +1,2 @@ +minor_changes: + - "homebrew, homebrew_cask - refactor common argument validation logic into a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, https://github.com/ansible-collections/community.general/pull/8324)." \ No newline at end of file diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py new file mode 100644 index 0000000000..2816832109 --- /dev/null +++ b/plugins/module_utils/homebrew.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +import re +from ansible.module_utils.six import string_types + + +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = filter(None, (line.split("#")[0].strip() for line in lines)) + group = r"[^" + r"".join(chars) + r"]" + return re.compile(group) + + +class HomebrewValidate(object): + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + \- # dashes + """.format( + sep=os.path.sep + ) + + VALID_BREW_PATH_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + \- # dashes + """.format( + sep=os.path.sep + ) + + VALID_PACKAGE_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + \- # dashes + : # colons (for URLs) + @ # at-sign + """ + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + """ + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + """ + + if isinstance(path, string_types): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + """ + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + """ + + if brew_path is None: + return True + + return isinstance( + brew_path, string_types + ) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + + @classmethod + def valid_package(cls, package): + """A valid package is either None or alphanumeric.""" + + if package is None: + return True + + return isinstance( + package, string_types + ) and not cls.INVALID_PACKAGE_REGEX.search(package) diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 144d73a5a6..388682d924 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -179,9 +179,10 @@ changed_pkgs: ''' import json -import os.path import re +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate + from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems, string_types @@ -208,98 +209,7 @@ def _check_package_in_json(json_output, package_type): class Homebrew(object): '''A class to manage Homebrew packages.''' - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - / # slash (for taps) - \+ # plusses - \- # dashes - : # colons (for URLs) - @ # at-sign - ''' - - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, string_types): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, string_types) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - @classmethod def valid_state(cls, state): ''' @@ -359,7 +269,7 @@ class Homebrew(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) @@ -379,7 +289,7 @@ class Homebrew(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) @@ -404,7 +314,7 @@ class Homebrew(object): @current_package.setter def current_package(self, package): - if not self.valid_package(package): + if not HomebrewValidate.valid_package(package): self._current_package = None self.failed = True self.message = 'Invalid package: {0}.'.format(package) @@ -491,7 +401,7 @@ class Homebrew(object): # checks ------------------------------------------------------- {{{ def _current_package_is_installed(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) @@ -514,7 +424,7 @@ class Homebrew(object): return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks") def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): return False rc, out, err = self.module.run_command([ @@ -526,7 +436,7 @@ class Homebrew(object): return rc != 0 def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): return False elif not self._current_package_is_installed(): return False @@ -624,7 +534,7 @@ class Homebrew(object): # installed ------------------------------ {{{ def _install_current_package(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) @@ -685,7 +595,7 @@ class Homebrew(object): def _upgrade_current_package(self): command = 'upgrade' - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) @@ -756,7 +666,7 @@ class Homebrew(object): # uninstalled ---------------------------- {{{ def _uninstall_current_package(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) @@ -805,7 +715,7 @@ class Homebrew(object): # linked --------------------------------- {{{ def _link_current_package(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) @@ -852,7 +762,7 @@ class Homebrew(object): # unlinked ------------------------------- {{{ def _unlink_current_package(self): - if not self.valid_package(self.current_package): + if not HomebrewValidate.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index c992693b68..dc9aea5db8 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -158,6 +158,7 @@ import re import tempfile from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule @@ -183,23 +184,6 @@ class HomebrewCask(object): '''A class to manage Homebrew casks.''' # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - VALID_CASK_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots @@ -208,58 +192,10 @@ class HomebrewCask(object): @ # at symbol ''' - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) # /class regexes ----------------------------------------------- }}} # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, (string_types)): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - @classmethod def valid_cask(cls, cask): '''A valid cask is either None or alphanumeric + backslashes.''' @@ -321,7 +257,7 @@ class HomebrewCask(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) @@ -341,7 +277,7 @@ class HomebrewCask(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) diff --git a/tests/unit/plugins/modules/test_homebrew.py b/tests/unit/plugins/modules/test_homebrew.py index f849b433df..d04ca4de58 100644 --- a/tests/unit/plugins/modules/test_homebrew.py +++ b/tests/unit/plugins/modules/test_homebrew.py @@ -2,23 +2,28 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.plugins.modules.homebrew import Homebrew +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate class TestHomebrewModule(unittest.TestCase): def setUp(self): - self.brew_app_names = [ - 'git-ssh', - 'awscli@1', - 'bash' + self.brew_app_names = ["git-ssh", "awscli@1", "bash"] + + self.invalid_names = [ + "git ssh", + "git*", ] def test_valid_package_names(self): for name in self.brew_app_names: - self.assertTrue(Homebrew.valid_package(name)) + self.assertTrue(HomebrewValidate.valid_package(name)) + + def test_invalid_package_names(self): + for name in self.invalid_names: + self.assertFalse(HomebrewValidate.valid_package(name)) From d347bf5fa007e740630c303a361c2e0c4973e4cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sat, 11 May 2024 16:53:44 +0200 Subject: [PATCH 068/482] add systemd run0 as a become method (#8306) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add systemd run0 as a become method Signed-off-by: Thomas Sjögren * add fragment Signed-off-by: Thomas Sjögren * remove space after hyphen Signed-off-by: Thomas Sjögren * replace ansible with collection version Signed-off-by: Thomas Sjögren * update version_added and remove changelog fragment Signed-off-by: Thomas Sjögren * update formating Signed-off-by: Thomas Sjögren * add types Signed-off-by: Thomas Sjögren * slim super() Signed-off-by: Thomas Sjögren * imports must appear below docs Signed-off-by: Thomas Sjögren * add initial unit test Signed-off-by: Thomas Sjögren * update unit tests Signed-off-by: Thomas Sjögren --------- Signed-off-by: Thomas Sjögren --- .github/BOTMETA.yml | 2 + plugins/become/run0.py | 128 +++++++++++++++++++++++++ tests/unit/plugins/become/test_run0.py | 64 +++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 plugins/become/run0.py create mode 100644 tests/unit/plugins/become/test_run0.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 41a4824d26..60d68a2833 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -33,6 +33,8 @@ files: maintainers: $team_ansible_core $becomes/pmrun.py: maintainers: $team_ansible_core + $becomes/run0.py: + maintainers: konstruktoid $becomes/sesu.py: maintainers: nekonyuu $becomes/sudosu.py: diff --git a/plugins/become/run0.py b/plugins/become/run0.py new file mode 100644 index 0000000000..1d6d7cb754 --- /dev/null +++ b/plugins/become/run0.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ + name: run0 + short_description: Systemd's run0 + description: + - This become plugins allows your remote/login user to execute commands as another user via the C(run0) utility. + author: + - Thomas Sjögren (@konstruktoid) + version_added: '9.0.0' + options: + become_user: + description: User you 'become' to execute the task. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: run0_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_run0_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_RUN0_USER + type: string + become_exe: + description: The C(run0) executable. + default: run0 + ini: + - section: privilege_escalation + key: become_exe + - section: run0_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_run0_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_RUN0_EXE + type: string + become_flags: + description: Options to pass to run0. + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: run0_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_run0_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_RUN0_FLAGS + type: string + notes: + - This plugin will only work when a polkit rule is in place. +""" + +EXAMPLES = r""" +# An example polkit rule that allows the user 'ansible' in the 'wheel' group +# to execute commands using run0 without authentication. +/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: | + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.systemd1.manage-units" && + subject.isInGroup("wheel") + subject.user == "ansible") { + return polkit.Result.YES; + } + }); +""" + +from re import compile as re_compile + +from ansible.plugins.become import BecomeBase +from ansible.module_utils._text import to_bytes + +ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m")) + + +class BecomeModule(BecomeBase): + + name = "community.general.run0" + + prompt = "Password: " + fail = ("==== AUTHENTICATION FAILED ====",) + success = ("==== AUTHENTICATION COMPLETE ====",) + require_tty = ( + True # see https://github.com/ansible-collections/community.general/issues/6932 + ) + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + + def build_become_command(self, cmd, shell): + super().build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option("become_exe") + flags = self.get_option("become_flags") + user = self.get_option("become_user") + + return ( + f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" + ) + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/tests/unit/plugins/become/test_run0.py b/tests/unit/plugins/become/test_run0.py new file mode 100644 index 0000000000..7507c556e8 --- /dev/null +++ b/tests/unit/plugins/become/test_run0.py @@ -0,0 +1,64 @@ +# Copyright (c) 2024 Ansible Project +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +from ansible import context + +from .helper import call_become_plugin + + +def test_run0_basic(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + + default_cmd = "/bin/foo" + default_exe = "/bin/sh" + run0_exe = "run0" + + success = "BECOME-SUCCESS-.+?" + + task = { + "become_method": "community.general.run0", + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + assert ( + re.match( + f"{run0_exe} --user=root {default_exe} -c 'echo {success}; {default_cmd}'", + cmd, + ) + is not None + ) + + +def test_run0_flags(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + + default_cmd = "/bin/foo" + default_exe = "/bin/sh" + run0_exe = "run0" + run0_flags = "--nice=15" + + success = "BECOME-SUCCESS-.+?" + + task = { + "become_method": "community.general.run0", + "become_flags": run0_flags, + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + assert ( + re.match( + f"{run0_exe} --user=root --nice=15 {default_exe} -c 'echo {success}; {default_cmd}'", + cmd, + ) + is not None + ) From a71e19130d041752aa393be85c22adcb1ad63e7a Mon Sep 17 00:00:00 2001 From: Eike Waldt Date: Sat, 11 May 2024 16:54:23 +0200 Subject: [PATCH 069/482] keycloak_user_federation: fix diff of empty `krbPrincipalAttribute` (#8320) keycloak_user_federation: fix diff of empty `krbPrincipalAttribute` (#8320) --- ...eycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml | 2 ++ plugins/modules/keycloak_user_federation.py | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml diff --git a/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml b/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml new file mode 100644 index 0000000000..df4a892733 --- /dev/null +++ b/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320). diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index fee0d1265c..f87ef936ce 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -719,6 +719,9 @@ def sanitize(comp): compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) if 'bindCredential' in compcopy['config']: compcopy['config']['bindCredential'] = '**********' + # an empty string is valid for krbPrincipalAttribute but is filtered out in diff + if 'krbPrincipalAttribute' not in compcopy['config']: + compcopy['config']['krbPrincipalAttribute'] = '' if 'mappers' in compcopy: for mapper in compcopy['mappers']: if 'config' in mapper: From b774435d8d0a47783e6f32a052910e89bc412654 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 11 May 2024 21:29:37 +0200 Subject: [PATCH 070/482] Pass codecov token to ansible-test-gh-action (#8341) Pass codecov token to ansible-test-gh-action. --- .github/workflows/ansible-test.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index ecfc365655..e57213e9fa 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -42,6 +42,7 @@ jobs: uses: felixfontein/ansible-test-gh-action@main with: ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} pull-request-change-detection: 'true' testing-type: sanity @@ -83,6 +84,7 @@ jobs: uses: felixfontein/ansible-test-gh-action@main with: ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} pre-test-cmd: >- mkdir -p ../../ansible @@ -183,6 +185,7 @@ jobs: uses: felixfontein/ansible-test-gh-action@main with: ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} docker-image: ${{ matrix.docker }} integration-continue-on-error: 'false' From 7aa118b957f8060d0d9cca3c80c1fb2c1c8ebeb0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 12 May 2024 00:36:12 +0200 Subject: [PATCH 071/482] Add test for unsafe plugin util (#8345) Add test for unsafe plugin util. --- .../unit/plugins/plugin_utils/test_unsafe.py | 133 ++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 tests/unit/plugins/plugin_utils/test_unsafe.py diff --git a/tests/unit/plugins/plugin_utils/test_unsafe.py b/tests/unit/plugins/plugin_utils/test_unsafe.py new file mode 100644 index 0000000000..3f35ee9337 --- /dev/null +++ b/tests/unit/plugins/plugin_utils/test_unsafe.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +import pytest + +from ansible.utils.unsafe_proxy import AnsibleUnsafe + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import ( + make_unsafe, +) + + +TEST_MAKE_UNSAFE = [ + ( + u'text', + [], + [ + (), + ], + ), + ( + u'{{text}}', + [ + (), + ], + [], + ), + ( + b'text', + [], + [ + (), + ], + ), + ( + b'{{text}}', + [ + (), + ], + [], + ), + ( + { + 'skey': 'value', + 'ukey': '{{value}}', + 1: [ + 'value', + '{{value}}', + { + 1.0: '{{value}}', + 2.0: 'value', + }, + ], + }, + [ + ('ukey', ), + (1, 1), + (1, 2, 1.0), + ], + [ + ('skey', ), + (1, 0), + (1, 2, 2.0), + ], + ), + ( + ['value', '{{value}}'], + [ + (1, ), + ], + [ + (0, ), + ], + ), +] + + +@pytest.mark.parametrize("value, check_unsafe_paths, check_safe_paths", TEST_MAKE_UNSAFE) +def test_make_unsafe(value, check_unsafe_paths, check_safe_paths): + unsafe_value = make_unsafe(value) + assert unsafe_value == value + for check_path in check_unsafe_paths: + obj = unsafe_value + for elt in check_path: + obj = obj[elt] + assert isinstance(obj, AnsibleUnsafe) + for check_path in check_safe_paths: + obj = unsafe_value + for elt in check_path: + obj = obj[elt] + assert not isinstance(obj, AnsibleUnsafe) + + +def test_make_unsafe_dict_key(): + value = { + b'test': 1, + u'test': 2, + } + unsafe_value = make_unsafe(value) + assert unsafe_value == value + for obj in unsafe_value: + assert not isinstance(obj, AnsibleUnsafe) + + value = { + b'{{test}}': 1, + u'{{test}}': 2, + } + unsafe_value = make_unsafe(value) + assert unsafe_value == value + for obj in unsafe_value: + assert isinstance(obj, AnsibleUnsafe) + + +def test_make_unsafe_set(): + value = set([b'test', u'test']) + unsafe_value = make_unsafe(value) + assert unsafe_value == value + for obj in unsafe_value: + assert not isinstance(obj, AnsibleUnsafe) + + value = set([b'{{test}}', u'{{test}}']) + unsafe_value = make_unsafe(value) + assert unsafe_value == value + for obj in unsafe_value: + assert isinstance(obj, AnsibleUnsafe) From 4f4075a54287b0886be48b8b5a89bf5b75b4fe88 Mon Sep 17 00:00:00 2001 From: Nils Brinkmann Date: Sun, 12 May 2024 09:21:39 +0200 Subject: [PATCH 072/482] Added parameter to select Content-Type when accessing the Rundeck API (#7684) * Added parameter to select Content-Type when accessing the Rundeck API * Removed autogenerated file * Fixed missing yml extension * Updated changelog text better describe what has happened Co-authored-by: Felix Fontein --------- Co-authored-by: Nils Brinkmann Co-authored-by: Felix Fontein --- .gitignore | 4 ++++ changelogs/fragments/7683-added-contenttype-parameter.yml | 2 ++ plugins/module_utils/rundeck.py | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/7683-added-contenttype-parameter.yml diff --git a/.gitignore b/.gitignore index b7868a9e41..cf1f74e41c 100644 --- a/.gitignore +++ b/.gitignore @@ -512,3 +512,7 @@ $RECYCLE.BIN/ # Integration tests cloud configs tests/integration/cloud-config-*.ini + + +# VSCode specific extensions +.vscode/settings.json diff --git a/changelogs/fragments/7683-added-contenttype-parameter.yml b/changelogs/fragments/7683-added-contenttype-parameter.yml new file mode 100644 index 0000000000..52f4b6b0c5 --- /dev/null +++ b/changelogs/fragments/7683-added-contenttype-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684). \ No newline at end of file diff --git a/plugins/module_utils/rundeck.py b/plugins/module_utils/rundeck.py index 7df68a3603..cffca7b4ee 100644 --- a/plugins/module_utils/rundeck.py +++ b/plugins/module_utils/rundeck.py @@ -28,7 +28,7 @@ def api_argument_spec(): return api_argument_spec -def api_request(module, endpoint, data=None, method="GET"): +def api_request(module, endpoint, data=None, method="GET", content_type="application/json"): """Manages Rundeck API requests via HTTP(S) :arg module: The AnsibleModule (used to get url, api_version, api_token, etc). @@ -63,7 +63,7 @@ def api_request(module, endpoint, data=None, method="GET"): data=json.dumps(data), method=method, headers={ - "Content-Type": "application/json", + "Content-Type": content_type, "Accept": "application/json", "X-Rundeck-Auth-Token": module.params["api_token"] } From 7f4f066e860b500a67e0273a8f06556c11f127c8 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 12 May 2024 10:02:06 +0200 Subject: [PATCH 073/482] Move version_added for consul docs fragment to modules (#8350) Move version_added for consul docs fragment to modules. --- plugins/doc_fragments/consul.py | 1 - plugins/modules/consul_policy.py | 2 ++ plugins/modules/consul_role.py | 2 ++ plugins/modules/consul_session.py | 2 ++ plugins/modules/consul_token.py | 2 ++ 5 files changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py index fbe3f33d4d..d4cf119958 100644 --- a/plugins/doc_fragments/consul.py +++ b/plugins/doc_fragments/consul.py @@ -56,5 +56,4 @@ attributes: support: full membership: - community.general.consul - version_added: 8.3.0 """ diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py index f020622a0c..2ed6021b03 100644 --- a/plugins/modules/consul_policy.py +++ b/plugins/modules/consul_policy.py @@ -33,6 +33,8 @@ attributes: version_added: 8.3.0 details: - In check mode the diff will miss operational attributes. + action_group: + version_added: 8.3.0 options: state: description: diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py index 0da71507a6..e07e2036fe 100644 --- a/plugins/modules/consul_role.py +++ b/plugins/modules/consul_role.py @@ -32,6 +32,8 @@ attributes: details: - In check mode the diff will miss operational attributes. version_added: 8.3.0 + action_group: + version_added: 8.3.0 options: name: description: diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py index bd03b561a7..87a5f19143 100644 --- a/plugins/modules/consul_session.py +++ b/plugins/modules/consul_session.py @@ -29,6 +29,8 @@ attributes: support: none diff_mode: support: none + action_group: + version_added: 8.3.0 options: id: description: diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py index eee419863f..02bc544da7 100644 --- a/plugins/modules/consul_token.py +++ b/plugins/modules/consul_token.py @@ -31,6 +31,8 @@ attributes: support: partial details: - In check mode the diff will miss operational attributes. + action_group: + version_added: 8.3.0 options: state: description: From 7dd7cbdba8b5ca422fbe826cad63392fe1f50ee7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 12 May 2024 10:03:06 +0200 Subject: [PATCH 074/482] Add proxmox action group (#8334) Add proxmox module defaults group. --- .../fragments/8334-proxmox-action-group.yml | 2 ++ meta/runtime.yml | 17 +++++++++++++++++ plugins/doc_fragments/proxmox.py | 10 ++++++++++ plugins/modules/proxmox.py | 3 +++ plugins/modules/proxmox_disk.py | 3 +++ plugins/modules/proxmox_domain_info.py | 4 ++++ plugins/modules/proxmox_group_info.py | 4 ++++ plugins/modules/proxmox_kvm.py | 3 +++ plugins/modules/proxmox_nic.py | 3 +++ plugins/modules/proxmox_node_info.py | 4 ++++ plugins/modules/proxmox_pool.py | 7 +++++-- plugins/modules/proxmox_pool_member.py | 7 +++++-- plugins/modules/proxmox_snap.py | 7 +++++-- .../modules/proxmox_storage_contents_info.py | 4 ++++ plugins/modules/proxmox_storage_info.py | 4 ++++ plugins/modules/proxmox_tasks_info.py | 10 +++++++--- plugins/modules/proxmox_template.py | 3 +++ plugins/modules/proxmox_user_info.py | 4 ++++ plugins/modules/proxmox_vm_info.py | 10 +++++++--- 19 files changed, 97 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/8334-proxmox-action-group.yml diff --git a/changelogs/fragments/8334-proxmox-action-group.yml b/changelogs/fragments/8334-proxmox-action-group.yml new file mode 100644 index 0000000000..0e5aeeccde --- /dev/null +++ b/changelogs/fragments/8334-proxmox-action-group.yml @@ -0,0 +1,2 @@ +minor_changes: + - "proxmox* modules - there is now a ``community.general.proxmox`` module defaults group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334)." diff --git a/meta/runtime.yml b/meta/runtime.yml index 402dfd5fa2..edeb53005f 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -12,6 +12,23 @@ action_groups: - consul_role - consul_session - consul_token + proxmox: + - proxmox + - proxmox_disk + - proxmox_domain_info + - proxmox_group_info + - proxmox_kvm + - proxmox_nic + - proxmox_node_info + - proxmox_pool + - proxmox_pool_member + - proxmox_snap + - proxmox_storage_contents_info + - proxmox_storage_info + - proxmox_tasks_info + - proxmox_template + - proxmox_user_info + - proxmox_vm_info plugin_routing: callback: actionable: diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py index 4972da4985..cb533fefa6 100644 --- a/plugins/doc_fragments/proxmox.py +++ b/plugins/doc_fragments/proxmox.py @@ -65,3 +65,13 @@ options: - Add the new VM to the specified pool. type: str ''' + + ACTIONGROUP_PROXMOX = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.proxmox) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.proxmox +""" diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 742c87c3c1..73afd952e2 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -21,6 +21,8 @@ attributes: support: none diff_mode: support: none + action_group: + version_added: 9.0.0 options: password: description: @@ -216,6 +218,7 @@ author: Sergei Antipov (@UnderGreen) seealso: - module: community.general.proxmox_vm_info extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.proxmox.selection - community.general.attributes diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py index 69a7300dfd..83cdbeee58 100644 --- a/plugins/modules/proxmox_disk.py +++ b/plugins/modules/proxmox_disk.py @@ -21,6 +21,8 @@ attributes: support: none diff_mode: support: none + action_group: + version_added: 9.0.0 options: name: description: @@ -325,6 +327,7 @@ options: - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x). type: str extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes ''' diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py index 7435695a91..f3ff212bff 100644 --- a/plugins/modules/proxmox_domain_info.py +++ b/plugins/modules/proxmox_domain_info.py @@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE domains version_added: 1.3.0 description: - Retrieve information about one or more Proxmox VE domains. +attributes: + action_group: + version_added: 9.0.0 options: domain: description: @@ -24,6 +27,7 @@ options: type: str author: Tristan Le Guern (@tleguern) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py index 531a9dae7a..eda1fe04d8 100644 --- a/plugins/modules/proxmox_group_info.py +++ b/plugins/modules/proxmox_group_info.py @@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE groups version_added: 1.3.0 description: - Retrieve information about one or more Proxmox VE groups +attributes: + action_group: + version_added: 9.0.0 options: group: description: @@ -24,6 +27,7 @@ options: type: str author: Tristan Le Guern (@tleguern) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 8779dcdc1f..253a75d4b3 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -21,6 +21,8 @@ attributes: support: none diff_mode: support: none + action_group: + version_added: 9.0.0 options: archive: description: @@ -579,6 +581,7 @@ options: seealso: - module: community.general.proxmox_vm_info extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.proxmox.selection - community.general.attributes diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py index 9afe494472..6e94ed0bb6 100644 --- a/plugins/modules/proxmox_nic.py +++ b/plugins/modules/proxmox_nic.py @@ -21,6 +21,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 9.0.0 options: bridge: description: @@ -94,6 +96,7 @@ options: - Specifies the instance ID. type: int extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes ''' diff --git a/plugins/modules/proxmox_node_info.py b/plugins/modules/proxmox_node_info.py index 82ef7aa388..51d8745c05 100644 --- a/plugins/modules/proxmox_node_info.py +++ b/plugins/modules/proxmox_node_info.py @@ -17,7 +17,11 @@ version_added: 8.2.0 description: - Retrieve information about one or more Proxmox VE nodes. author: John Berninger (@jwbernin) +attributes: + action_group: + version_added: 9.0.0 extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_pool.py b/plugins/modules/proxmox_pool.py index 7046320700..5089ec3bef 100644 --- a/plugins/modules/proxmox_pool.py +++ b/plugins/modules/proxmox_pool.py @@ -21,6 +21,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 9.0.0 options: poolid: description: @@ -42,8 +44,9 @@ options: type: str extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.attributes + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes """ EXAMPLES = """ diff --git a/plugins/modules/proxmox_pool_member.py b/plugins/modules/proxmox_pool_member.py index 7d6b249493..b26082f975 100644 --- a/plugins/modules/proxmox_pool_member.py +++ b/plugins/modules/proxmox_pool_member.py @@ -20,6 +20,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 9.0.0 options: poolid: description: @@ -48,8 +50,9 @@ options: type: str extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.attributes + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes """ EXAMPLES = """ diff --git a/plugins/modules/proxmox_snap.py b/plugins/modules/proxmox_snap.py index 4991423c2a..4f7b345b80 100644 --- a/plugins/modules/proxmox_snap.py +++ b/plugins/modules/proxmox_snap.py @@ -21,6 +21,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 9.0.0 options: hostname: description: @@ -89,8 +91,9 @@ notes: requirements: [ "proxmoxer", "requests" ] author: Jeffrey van Pelt (@Thulium-Drake) extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.attributes + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes ''' EXAMPLES = r''' diff --git a/plugins/modules/proxmox_storage_contents_info.py b/plugins/modules/proxmox_storage_contents_info.py index 498490fe41..b777870e54 100644 --- a/plugins/modules/proxmox_storage_contents_info.py +++ b/plugins/modules/proxmox_storage_contents_info.py @@ -17,6 +17,9 @@ short_description: List content from a Proxmox VE storage version_added: 8.2.0 description: - Retrieves information about stored objects on a specific storage attached to a node. +attributes: + action_group: + version_added: 9.0.0 options: storage: description: @@ -41,6 +44,7 @@ options: type: int author: Julian Vanden Broeck (@l00ptr) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py index 3c29e59cf2..fd5a6ee0d8 100644 --- a/plugins/modules/proxmox_storage_info.py +++ b/plugins/modules/proxmox_storage_info.py @@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE storages version_added: 2.2.0 description: - Retrieve information about one or more Proxmox VE storages. +attributes: + action_group: + version_added: 9.0.0 options: storage: description: @@ -28,6 +31,7 @@ options: type: str author: Tristan Le Guern (@tleguern) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py index d31a04980b..65a07566a8 100644 --- a/plugins/modules/proxmox_tasks_info.py +++ b/plugins/modules/proxmox_tasks_info.py @@ -17,6 +17,9 @@ version_added: 3.8.0 description: - Retrieve information about one or more Proxmox VE tasks. author: 'Andreas Botzner (@paginabianca) ' +attributes: + action_group: + version_added: 9.0.0 options: node: description: @@ -29,9 +32,10 @@ options: aliases: ['upid', 'name'] type: str extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module ''' diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py index 615bfc1823..f73109931f 100644 --- a/plugins/modules/proxmox_template.py +++ b/plugins/modules/proxmox_template.py @@ -20,6 +20,8 @@ attributes: support: none diff_mode: support: none + action_group: + version_added: 9.0.0 options: node: description: @@ -69,6 +71,7 @@ notes: - C(proxmoxer) >= 1.2.0 requires C(requests_toolbelt) to upload files larger than 256 MB. author: Sergei Antipov (@UnderGreen) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes ''' diff --git a/plugins/modules/proxmox_user_info.py b/plugins/modules/proxmox_user_info.py index 20154528a6..8680dec7ca 100644 --- a/plugins/modules/proxmox_user_info.py +++ b/plugins/modules/proxmox_user_info.py @@ -16,6 +16,9 @@ short_description: Retrieve information about one or more Proxmox VE users version_added: 1.3.0 description: - Retrieve information about one or more Proxmox VE users +attributes: + action_group: + version_added: 9.0.0 options: domain: description: @@ -33,6 +36,7 @@ options: type: str author: Tristan Le Guern (@tleguern) extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py index 30342b684e..39d8307a43 100644 --- a/plugins/modules/proxmox_vm_info.py +++ b/plugins/modules/proxmox_vm_info.py @@ -17,6 +17,9 @@ version_added: 7.2.0 description: - Retrieve information about one or more Proxmox VE virtual machines. author: 'Sergei Antipov (@UnderGreen) ' +attributes: + action_group: + version_added: 9.0.0 options: node: description: @@ -55,9 +58,10 @@ options: default: none version_added: 8.1.0 extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module """ EXAMPLES = """ From fabf6263f1c5fcf4a0ab35ae5a02bf1cdb93c595 Mon Sep 17 00:00:00 2001 From: Florian Apolloner Date: Wed, 15 May 2024 18:46:12 +0200 Subject: [PATCH 075/482] Fix sanitize for keycloak_identitiy_provider. (#8355) * Fix sanitize for keycloak_identitiy_provider. * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8355-keycloak-idp-sanitize.yaml | 2 ++ plugins/modules/keycloak_identity_provider.py | 2 +- .../targets/keycloak_identity_provider/tasks/main.yml | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8355-keycloak-idp-sanitize.yaml diff --git a/changelogs/fragments/8355-keycloak-idp-sanitize.yaml b/changelogs/fragments/8355-keycloak-idp-sanitize.yaml new file mode 100644 index 0000000000..3a7942bb88 --- /dev/null +++ b/changelogs/fragments/8355-keycloak-idp-sanitize.yaml @@ -0,0 +1,2 @@ +security_fixes: + - keycloak_identity_provider - the client secret was not correctly sanitized by the module. The return values ``proposed``, ``existing``, and ``end_state``, as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355). \ No newline at end of file diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index 588f553e8d..2eca3a06d2 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -437,7 +437,7 @@ def sanitize(idp): idpcopy = deepcopy(idp) if 'config' in idpcopy: if 'clientSecret' in idpcopy['config']: - idpcopy['clientSecret'] = '**********' + idpcopy['config']['clientSecret'] = '**********' return idpcopy diff --git a/tests/integration/targets/keycloak_identity_provider/tasks/main.yml b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml index afad9740ed..fa118ed1d9 100644 --- a/tests/integration/targets/keycloak_identity_provider/tasks/main.yml +++ b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml @@ -62,6 +62,7 @@ - result.existing == {} - result.end_state.alias == "{{ idp }}" - result.end_state.mappers != [] + - result.end_state.config.client_secret = "**********" - name: Update existing identity provider (no change) community.general.keycloak_identity_provider: From 6889e0478d8404250debaca37376185002f0d9d2 Mon Sep 17 00:00:00 2001 From: Wilfried ROSET Date: Wed, 15 May 2024 18:47:05 +0200 Subject: [PATCH 076/482] [opentelemetry][callback] Add support for http exporter (#8321) * [opentelemetry][callback] Add support for http exporter The previous version of the callback was supporting only the grpc exporter. This was counter intuitive as the documentation was mentioning ``. Users were left with a error similar to `Transient error StatusCode.UNAVAILABLE encountered while exporting traces to , retrying in 1s.` The following commit fix this situation by support both HTTP and GRPC via the standard environment variables and ansible.cfg See as well https://github.com/ansible-collections/community.general/issues/7888 Signed-off-by: Wilfried Roset * [opentelemetry][callback] Take into account review Signed-off-by: Wilfried Roset --------- Signed-off-by: Wilfried Roset --- .../8321-fix-opentelemetry-callback.yml | 2 + plugins/callback/opentelemetry.py | 41 +++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8321-fix-opentelemetry-callback.yml diff --git a/changelogs/fragments/8321-fix-opentelemetry-callback.yml b/changelogs/fragments/8321-fix-opentelemetry-callback.yml new file mode 100644 index 0000000000..a02f12c6b9 --- /dev/null +++ b/changelogs/fragments/8321-fix-opentelemetry-callback.yml @@ -0,0 +1,2 @@ +minor_changes: + - opentelemetry - add support for HTTP trace_exporter and configures the behavior via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8321). diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 492e420716..c3437b7306 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -84,6 +84,22 @@ DOCUMENTATION = ''' - section: callback_opentelemetry key: disable_attributes_in_logs version_added: 7.1.0 + otel_exporter_otlp_traces_protocol: + type: str + description: + - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the the transport protocol for spans. + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). + default: grpc + choices: + - grpc + - http/protobuf + env: + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + ini: + - section: callback_opentelemetry + key: otel_exporter_otlp_traces_protocol + version_added: 9.0.0 requirements: - opentelemetry-api (Python library) - opentelemetry-exporter-otlp (Python library) @@ -124,7 +140,8 @@ from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace from opentelemetry.trace import SpanKind - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCOTLPSpanExporter + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPOTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator @@ -255,7 +272,15 @@ class OpenTelemetrySource(object): task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs, disable_attributes_in_logs): + def generate_distributed_traces(self, + otel_service_name, + ansible_playbook, + tasks_data, + status, + traceparent, + disable_logs, + disable_attributes_in_logs, + otel_exporter_otlp_traces_protocol): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -271,7 +296,11 @@ class OpenTelemetrySource(object): ) ) - processor = BatchSpanProcessor(OTLPSpanExporter()) + processor = None + if otel_exporter_otlp_traces_protocol == 'grpc': + processor = BatchSpanProcessor(GRPCOTLPSpanExporter()) + else: + processor = BatchSpanProcessor(HTTPOTLPSpanExporter()) trace.get_tracer_provider().add_span_processor(processor) @@ -462,6 +491,7 @@ class CallbackModule(CallbackBase): self.errors = 0 self.disabled = False self.traceparent = False + self.otel_exporter_otlp_traces_protocol = None if OTEL_LIBRARY_IMPORT_ERROR: raise_from( @@ -497,6 +527,8 @@ class CallbackModule(CallbackBase): # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 self.traceparent = self.get_option('traceparent') + self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -585,7 +617,8 @@ class CallbackModule(CallbackBase): status, self.traceparent, self.disable_logs, - self.disable_attributes_in_logs + self.disable_attributes_in_logs, + self.otel_exporter_otlp_traces_protocol, ) def v2_runner_on_async_failed(self, result, **kwargs): From 1ac94b5f446de5ec7c5563132c22dfd0c50526d6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 16 May 2024 04:51:42 +1200 Subject: [PATCH 077/482] Initial commit for django modutils and django_command module (#8349) * Initial commit for django modutils and django_command module * Fixed sanity, param settings required * add stdout, stderr and cmd to django module output * add examples and return docs to djando_command module * multiple minor adjustments * fix typo * Update plugins/modules/django_command.py Co-authored-by: Felix Fontein * Update plugins/modules/django_command.py Co-authored-by: Felix Fontein * Update plugins/modules/django_command.py Co-authored-by: Felix Fontein * move note to seealso * add xfailing test * Update plugins/doc_fragments/django.py Co-authored-by: Felix Fontein * Update plugins/doc_fragments/django.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 6 ++ plugins/doc_fragments/django.py | 53 ++++++++++++ plugins/module_utils/django.py | 84 +++++++++++++++++++ plugins/modules/django_command.py | 83 ++++++++++++++++++ tests/unit/plugins/modules/helper.py | 4 +- .../plugins/modules/test_django_command.py | 13 +++ .../plugins/modules/test_django_command.yaml | 40 +++++++++ 7 files changed, 281 insertions(+), 2 deletions(-) create mode 100644 plugins/doc_fragments/django.py create mode 100644 plugins/module_utils/django.py create mode 100644 plugins/modules/django_command.py create mode 100644 tests/unit/plugins/modules/test_django_command.py create mode 100644 tests/unit/plugins/modules/test_django_command.yaml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 60d68a2833..5e674628f4 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -119,6 +119,8 @@ files: maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments + $doc_fragments/django.py: + maintainers: russoz $doc_fragments/hpe3par.py: labels: hpe3par maintainers: farhan7500 gautamphegde @@ -300,6 +302,8 @@ files: maintainers: russoz $module_utils/deps.py: maintainers: russoz + $module_utils/django.py: + maintainers: russoz $module_utils/gconftool2.py: labels: gconftool2 maintainers: russoz @@ -500,6 +504,8 @@ files: ignore: scottanderson42 tastychutney labels: django_manage maintainers: russoz + $modules/django_command.py: + maintainers: russoz $modules/dnf_versionlock.py: maintainers: moreda $modules/dnf_config_manager.py: diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py new file mode 100644 index 0000000000..d92799937d --- /dev/null +++ b/plugins/doc_fragments/django.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + venv: + description: + - Use the the Python interpreter from this virtual environment. + - Pass the path to the root of the virtualenv, not the C(bin/) directory nor the C(python) executable. + type: path + settings: + description: + - Specifies the settings module to use. + - The value will be passed as is to the C(--settings) argument in C(django-admin). + type: str + required: true + pythonpath: + description: + - Adds the given filesystem path to the Python import search path. + - The value will be passed as is to the C(--pythonpath) argument in C(django-admin). + type: path + traceback: + description: + - Provides a full stack trace in the output when a C(CommandError) is raised. + type: bool + verbosity: + description: + - Specifies the amount of notification and debug information in the output of C(django-admin). + type: int + choices: [0, 1, 2, 3] + skip_checks: + description: + - Skips running system checks prior to running the command. + type: bool + + +notes: + - The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed. + +seealso: + - name: django-admin and manage.py in official Django documentation + description: >- + Refer to this documentation for the builtin commands and options of C(django-admin). + Please make sure that you select the right version of Django in the version selector on that page. + link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ +''' diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py new file mode 100644 index 0000000000..b93dabbd2c --- /dev/null +++ b/plugins/module_utils/django.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +django_std_args = dict( + # environmental options + venv=dict(type="path"), + # default options of django-admin + settings=dict(type="str", required=True), + pythonpath=dict(type="path"), + traceback=dict(type="bool"), + verbosity=dict(type="int", choices=[0, 1, 2, 3]), + skip_checks=dict(type="bool"), +) + +_django_std_arg_fmts = dict( + command=cmd_runner_fmt.as_list(), + settings=cmd_runner_fmt.as_opt_eq_val("--settings"), + pythonpath=cmd_runner_fmt.as_opt_eq_val("--pythonpath"), + traceback=cmd_runner_fmt.as_bool("--traceback"), + verbosity=cmd_runner_fmt.as_opt_val("--verbosity"), + no_color=cmd_runner_fmt.as_fixed("--no-color"), + skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), +) + + +class _DjangoRunner(PythonRunner): + def __init__(self, module, arg_formats=None, **kwargs): + arg_fmts = dict(arg_formats) if arg_formats else {} + arg_fmts.update(_django_std_arg_fmts) + + super(_DjangoRunner, self).__init__(module, ["-m", "django"], arg_formats=arg_fmts, **kwargs) + + def __call__(self, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs): + args_order = ( + ("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order) + ) + return super(_DjangoRunner, self).__call__(args_order, output_process, ignore_value_none, check_mode_skip, check_mode_return, **kwargs) + + +class DjangoModuleHelper(ModuleHelper): + module = {} + use_old_vardict = False + django_admin_cmd = None + arg_formats = {} + django_admin_arg_order = () + + def __init__(self): + argument_spec = dict(django_std_args) + argument_spec.update(self.module.get("argument_spec", {})) + self.module["argument_spec"] = argument_spec + super(DjangoModuleHelper, self).__init__(self.module) + if self.django_admin_cmd is not None: + self.vars.command = self.django_admin_cmd + + def __run__(self): + runner = _DjangoRunner(self.module, + default_args_order=self.django_admin_arg_order, + arg_formats=self.arg_formats, + venv=self.vars.venv, + check_rc=True) + with runner() as ctx: + results = ctx.run() + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + if self.verbosity >= 3: + self.vars.run_info = ctx.run_info + + return results + + @classmethod + def execute(cls): + cls().run() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py new file mode 100644 index 0000000000..788f4a100e --- /dev/null +++ b/plugins/modules/django_command.py @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: django_command +author: + - Alexei Znamensky (@russoz) +short_description: Run Django admin commands +version_added: 9.0.0 +description: + - This module allows the execution of arbitrary Django admin commands. +extends_documentation_fragment: + - community.general.attributes + - community.general.django +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. + type: str + required: true + extra_args: + type: list + elements: str + description: + - List of extra arguments passed to the django admin command. +""" + +EXAMPLES = """ +- name: Check the project + community.general.django_command: + command: check + settings: myproject.settings + +- name: Check the project in specified python path, using virtual environment + community.general.django_command: + command: check + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = """ +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCommand(DjangoModuleHelper): + module = dict( + argument_spec=dict( + command=dict(type="str", required=True), + extra_args=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + arg_formats = dict( + extra_args=cmd_runner_fmt.as_list(), + ) + django_admin_arg_order = "extra_args" + + +def main(): + DjangoCommand.execute() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/helper.py index a7322bf4d8..1ffa19aad4 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/helper.py @@ -52,9 +52,9 @@ class _BaseContext(object): test_flags = self.test_flags() if test_flags.get("skip"): - pytest.skip() + pytest.skip(test_flags.get("skip")) if test_flags.get("xfail"): - pytest.xfail() + pytest.xfail(test_flags.get("xfail")) func() diff --git a/tests/unit/plugins/modules/test_django_command.py b/tests/unit/plugins/modules/test_django_command.py new file mode 100644 index 0000000000..ffa9feb394 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_command.py @@ -0,0 +1,13 @@ +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import django_command +from .helper import Helper + + +Helper.from_module(django_command, __name__) diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml new file mode 100644 index 0000000000..9fe9b419f9 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: command_success + input: + command: check + extra_args: + - babaloo + - yaba + - daba + - doo + settings: whatever.settings + run_command_calls: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "whatever\n" + err: "" +- id: command_fail + input: + command: check + extra_args: + - babaloo + - yaba + - daba + - doo + settings: whatever.settings + output: + failed: true + flags: + xfail: not seem to be failing as it should + run_command_calls: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + environ: *env-def + rc: 1 + out: "whatever\n" + err: "" From cd01a928abad919302e51974fc0c96ececf8ddc0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 15 May 2024 20:49:57 +0200 Subject: [PATCH 078/482] Add REUSE badge (#8365) Add REUSE badge. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dd1a50b0ec..b5a6fcfa24 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ SPDX-License-Identifier: GPL-3.0-or-later [![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) [![EOL CI](https://github.com/ansible-collections/community.general/workflows/EOL%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) +[![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. From bb73f28bf51888671fffea4b6f92d9e2eec61b75 Mon Sep 17 00:00:00 2001 From: kurokobo Date: Sat, 18 May 2024 22:41:34 +0900 Subject: [PATCH 079/482] feat: implement timestamp callback plugin to show simple timestamp for each header (#8308) * feat: add community.general.timestamp callback plugin * feat: add minimal integration tests for timestamp callback plugin * feat: add maintainers for timestamp callback plugin * fix: correct license * fix: remove type annotation for the older python environment * fix: remove unnecessary comment Co-authored-by: Felix Fontein * fix: add trailing period Co-authored-by: Felix Fontein * fix: split long description into list Co-authored-by: Felix Fontein * fix: remove default and add type Co-authored-by: Felix Fontein * fix; add type Co-authored-by: Felix Fontein * fix: split long description into list Co-authored-by: Felix Fontein * fix: improve description for format_string to describe usable format codes * fix: clarify the original codes and add copyright from that * fix: shorten long lines * fix: correct link format * fix: add seealso section * fix: add ignore entries for EOL CI * fix: update seealso to correctly associate with related plugin Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/callback/timestamp.py | 127 ++++++++++++++++++ .../targets/callback_timestamp/aliases | 6 + .../targets/callback_timestamp/tasks/main.yml | 66 +++++++++ tests/sanity/ignore-2.13.txt | 1 + tests/sanity/ignore-2.14.txt | 1 + 6 files changed, 203 insertions(+) create mode 100644 plugins/callback/timestamp.py create mode 100644 tests/integration/targets/callback_timestamp/aliases create mode 100644 tests/integration/targets/callback_timestamp/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 5e674628f4..add3249355 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -91,6 +91,8 @@ files: maintainers: ryancurrah $callbacks/syslog_json.py: maintainers: imjoseangel + $callbacks/timestamp.py: + maintainers: kurokobo $callbacks/unixy.py: labels: unixy maintainers: akatch diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py new file mode 100644 index 0000000000..07cd8d239c --- /dev/null +++ b/plugins/callback/timestamp.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, kurokobo +# Copyright (c) 2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" + name: timestamp + type: stdout + short_description: Adds simple timestamp for each header + version_added: 9.0.0 + description: + - This callback adds simple timestamp for each header. + author: kurokobo (@kurokobo) + options: + timezone: + description: + - Timezone to use for the timestamp in IANA time zone format. + - For example C(America/New_York), C(Asia/Tokyo)). Ignored on Python < 3.9. + ini: + - section: callback_timestamp + key: timezone + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE + type: string + format_string: + description: + - Format of the timestamp shown to user in 1989 C standard format. + - > + Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) + for the available format codes. + ini: + - section: callback_timestamp + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING + default: "%H:%M:%S" + type: string + seealso: + - plugin: ansible.posix.profile_tasks + plugin_type: callback + description: > + You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time + with detailed timestamps. + extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + + +from ansible.plugins.callback.default import CallbackModule as Default +from ansible.utils.display import get_text_width +from ansible.module_utils.common.text.converters import to_text +from datetime import datetime +import types +import sys + +# Store whether the zoneinfo module is available +_ZONEINFO_AVAILABLE = sys.version_info >= (3, 9) + + +def get_datetime_now(tz): + """ + Returns the current timestamp with the specified timezone + """ + return datetime.now(tz=tz) + + +def banner(self, msg, color=None, cows=True): + """ + Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum) with trailing timestamp + + Based on the banner method of Display class from ansible.utils.display + + https://github.com/ansible/ansible/blob/4403519afe89138042108e237aef317fd5f09c33/lib/ansible/utils/display.py#L511 + """ + timestamp = get_datetime_now(self.timestamp_tzinfo).strftime(self.timestamp_format_string) + timestamp_len = get_text_width(timestamp) + 1 # +1 for leading space + + msg = to_text(msg) + if self.b_cowsay and cows: + try: + self.banner_cowsay("%s @ %s" % (msg, timestamp)) + return + except OSError: + self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.") + + msg = msg.strip() + try: + star_len = self.columns - get_text_width(msg) - timestamp_len + except EnvironmentError: + star_len = self.columns - len(msg) - timestamp_len + if star_len <= 3: + star_len = 3 + stars = "*" * star_len + self.display("\n%s %s %s" % (msg, stars, timestamp), color=color) + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.timestamp" + + def __init__(self): + super(CallbackModule, self).__init__() + + # Replace the banner method of the display object with the custom one + self._display.banner = types.MethodType(banner, self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + # Store zoneinfo for specified timezone if available + tzinfo = None + if _ZONEINFO_AVAILABLE and self.get_option("timezone"): + from zoneinfo import ZoneInfo + + tzinfo = ZoneInfo(self.get_option("timezone")) + + # Inject options into the display object + setattr(self._display, "timestamp_tzinfo", tzinfo) + setattr(self._display, "timestamp_format_string", self.get_option("format_string")) diff --git a/tests/integration/targets/callback_timestamp/aliases b/tests/integration/targets/callback_timestamp/aliases new file mode 100644 index 0000000000..124adcfb8c --- /dev/null +++ b/tests/integration/targets/callback_timestamp/aliases @@ -0,0 +1,6 @@ +# Copyright (c) 2024, kurokobo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or ) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 +needs/target/callback diff --git a/tests/integration/targets/callback_timestamp/tasks/main.yml b/tests/integration/targets/callback_timestamp/tasks/main.yml new file mode 100644 index 0000000000..5e0acc15f0 --- /dev/null +++ b/tests/integration/targets/callback_timestamp/tasks/main.yml @@ -0,0 +1,66 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) 2024, kurokobo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Run tests + include_role: + name: callback + vars: + tests: + - name: Enable timestamp in the default length + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_STDOUT_CALLBACK: community.general.timestamp + ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING: "15:04:05" + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task name + debug: + msg: sample debug msg + expected_output: [ + "", + "PLAY [testhost] ******************************************************* 15:04:05", + "", + "TASK [Sample task name] *********************************************** 15:04:05", + "ok: [testhost] => {", + " \"msg\": \"sample debug msg\"", + "}", + "", + "PLAY RECAP ************************************************************ 15:04:05", + "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + ] + + - name: Enable timestamp in the longer length + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_STDOUT_CALLBACK: community.general.timestamp + ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING: "2006-01-02T15:04:05" + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task name + debug: + msg: sample debug msg + expected_output: [ + "", + "PLAY [testhost] ******************************************** 2006-01-02T15:04:05", + "", + "TASK [Sample task name] ************************************ 2006-01-02T15:04:05", + "ok: [testhost] => {", + " \"msg\": \"sample debug msg\"", + "}", + "", + "PLAY RECAP ************************************************* 2006-01-02T15:04:05", + "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + ] diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index 954a8afebf..cfeaff7c31 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,4 +1,5 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen +plugins/callback/timestamp.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 01b195e9f5..247d43fe37 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,4 +1,5 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen +plugins/callback/timestamp.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice From a409f8fc2fcfcc36a942f6b50b0e225c4d2db74c Mon Sep 17 00:00:00 2001 From: Gilbert Gilb's Date: Sat, 18 May 2024 15:42:11 +0200 Subject: [PATCH 080/482] feat(gandi_livedns): support personal access tokens (#8337) * fix(gandi_livedns): fix unsafe conditionals in tests * feat(gandi_livedns): support personal access tokens Fixes #7639 --- ...90-gandi-livedns-personal-access-token.yml | 2 + plugins/module_utils/gandi_livedns_api.py | 8 +++- plugins/modules/gandi_livedns.py | 37 ++++++++++++++++--- .../gandi_livedns/tasks/create_record.yml | 25 +++++++++---- .../gandi_livedns/tasks/update_record.yml | 24 ++++++------ 5 files changed, 69 insertions(+), 27 deletions(-) create mode 100644 changelogs/fragments/8290-gandi-livedns-personal-access-token.yml diff --git a/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml b/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml new file mode 100644 index 0000000000..3168bf20fd --- /dev/null +++ b/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml @@ -0,0 +1,2 @@ +minor_changes: + - gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, https://github.com/ansible-collections/community.general/pull/8337). diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 53245d44d0..824fea46e7 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -33,6 +33,7 @@ class GandiLiveDNSAPI(object): def __init__(self, module): self.module = module self.api_key = module.params['api_key'] + self.personal_access_token = module.params['personal_access_token'] def _build_error_message(self, module, info): s = '' @@ -50,7 +51,12 @@ class GandiLiveDNSAPI(object): return s def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): - headers = {'Authorization': 'Apikey {0}'.format(self.api_key), + authorization_header = ( + 'Bearer {0}'.format(self.personal_access_token) + if self.personal_access_token + else 'Apikey {0}'.format(self.api_key) + ) + headers = {'Authorization': authorization_header, 'Content-Type': 'application/json'} data = None if payload: diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py index fdb7993a5e..ad2e96fd15 100644 --- a/plugins/modules/gandi_livedns.py +++ b/plugins/modules/gandi_livedns.py @@ -25,11 +25,19 @@ attributes: diff_mode: support: none options: + personal_access_token: + description: + - Scoped API token. + - One of O(personal_access_token) and O(api_key) must be specified. + type: str + version_added: 9.0.0 api_key: description: - Account API token. + - Note that these type of keys are deprecated and might stop working at some point. + Use personal access tokens instead. + - One of O(personal_access_token) and O(api_key) must be specified. type: str - required: true record: description: - Record to add. @@ -73,7 +81,7 @@ EXAMPLES = r''' values: - 127.0.0.1 ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken register: record - name: Create a mail CNAME record to www.my.com domain @@ -84,7 +92,7 @@ EXAMPLES = r''' values: - www ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Change its TTL @@ -95,7 +103,7 @@ EXAMPLES = r''' values: - www ttl: 10800 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Delete the record @@ -103,8 +111,18 @@ EXAMPLES = r''' domain: my.com type: CNAME record: mail - api_key: dummyapitoken + personal_access_token: dummytoken state: absent + +- name: Use a (deprecated) API Key + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapikey ''' RETURN = r''' @@ -151,7 +169,8 @@ from ansible_collections.community.general.plugins.module_utils.gandi_livedns_ap def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), + api_key=dict(type='str', no_log=True), + personal_access_token=dict(type='str', no_log=True), record=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), ttl=dict(type='int'), @@ -163,6 +182,12 @@ def main(): required_if=[ ('state', 'present', ['values', 'ttl']), ], + mutually_exclusive=[ + ('api_key', 'personal_access_token'), + ], + required_one_of=[ + ('api_key', 'personal_access_token'), + ], ) gandi_api = GandiLiveDNSAPI(module) diff --git a/tests/integration/targets/gandi_livedns/tasks/create_record.yml b/tests/integration/targets/gandi_livedns/tasks/create_record.yml index c3f1c17981..87056aa865 100644 --- a/tests/integration/targets/gandi_livedns/tasks/create_record.yml +++ b/tests/integration/targets/gandi_livedns/tasks/create_record.yml @@ -45,10 +45,10 @@ assert: that: - result is changed - - result.record['values'] == {{ item['values'] }} - - result.record.record == "{{ item.record }}" - - result.record.type == "{{ item.type }}" - - result.record.ttl == {{ item.ttl }} + - result.record['values'] == item['values'] + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == item.ttl - name: test create a dns record idempotence community.general.gandi_livedns: @@ -63,7 +63,16 @@ assert: that: - result is not changed - - result.record['values'] == {{ item['values'] }} - - result.record.record == "{{ item.record }}" - - result.record.type == "{{ item.type }}" - - result.record.ttl == {{ item.ttl }} + - result.record['values'] == item['values'] + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == item.ttl + +- name: test create a DNS record with personal access token + community.general.gandi_livedns: + personal_access_token: "{{ gandi_personal_access_token }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item['values'] }}" + ttl: "{{ item.ttl }}" + type: "{{ item.type }}" diff --git a/tests/integration/targets/gandi_livedns/tasks/update_record.yml b/tests/integration/targets/gandi_livedns/tasks/update_record.yml index a080560a75..5f19bfa244 100644 --- a/tests/integration/targets/gandi_livedns/tasks/update_record.yml +++ b/tests/integration/targets/gandi_livedns/tasks/update_record.yml @@ -17,10 +17,10 @@ assert: that: - result is changed - - result.record['values'] == {{ item.update_values | default(item['values']) }} - - result.record.record == "{{ item.record }}" - - result.record.type == "{{ item.type }}" - - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == (item.update_ttl | default(item.ttl)) - name: test update or add another dns record community.general.gandi_livedns: @@ -35,10 +35,10 @@ assert: that: - result is changed - - result.record['values'] == {{ item.update_values | default(item['values']) }} - - result.record.record == "{{ item.record }}" - - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} - - result.record.type == "{{ item.type }}" + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.ttl == (item.update_ttl | default(item.ttl)) + - result.record.type == item.type - name: test update or add another dns record idempotence community.general.gandi_livedns: @@ -53,7 +53,7 @@ assert: that: - result is not changed - - result.record['values'] == {{ item.update_values | default(item['values']) }} - - result.record.record == "{{ item.record }}" - - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} - - result.record.type == "{{ item.type }}" + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.ttl == (item.update_ttl | default(item.ttl)) + - result.record.type == item.type From da8b133a7325238de71b363fa906465bc42c9743 Mon Sep 17 00:00:00 2001 From: Jay Hendren <3742215+jayhendren@users.noreply.github.com> Date: Sat, 18 May 2024 08:13:59 -0600 Subject: [PATCH 081/482] add clarifying notes to scope description (#8377) --- plugins/modules/ldap_search.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py index 45744e634a..7958f86e0b 100644 --- a/plugins/modules/ldap_search.py +++ b/plugins/modules/ldap_search.py @@ -44,6 +44,8 @@ options: type: str description: - The LDAP scope to use. + - V(subordinate) requires the LDAPv3 subordinate feature extension. + - V(children) is equivalent to a "subtree" scope. filter: default: '(objectClass=*)' type: str From 0350a631de607c9067fb85177409ac81fb9f6a45 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 18 May 2024 16:35:31 +0200 Subject: [PATCH 082/482] Prepare 9.0.0 release. --- changelogs/fragments/9.0.0.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelogs/fragments/9.0.0.yml diff --git a/changelogs/fragments/9.0.0.yml b/changelogs/fragments/9.0.0.yml new file mode 100644 index 0000000000..8de366f74c --- /dev/null +++ b/changelogs/fragments/9.0.0.yml @@ -0,0 +1 @@ +release_summary: This is release 9.0.0 of `community.general`, released on 2024-05-20. From 70c78c1d71fe7c3795d98df039b9c4b15a6bfa35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20GATELLIER?= <26511053+lgatellier@users.noreply.github.com> Date: Sun, 19 May 2024 20:47:54 +0200 Subject: [PATCH 083/482] gitlab modules: deprecate basic auth method (#8383) --- changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml | 2 ++ plugins/module_utils/gitlab.py | 5 +++++ 2 files changed, 7 insertions(+) create mode 100644 changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml diff --git a/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml b/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml new file mode 100644 index 0000000000..b9c35cd0e4 --- /dev/null +++ b/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml @@ -0,0 +1,2 @@ +deprecated_features: + - "gitlab modules - the basic auth method on GitLab API have been deprecated and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383)." diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index b1354d8a9d..224789a71e 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -115,6 +115,11 @@ def gitlab_authentication(module, min_version=None): # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 # This condition allow to still support older version of the python-gitlab library if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"): + module.deprecate( + "GitLab basic auth is deprecated and will be removed in next major version, " + "using another auth method (API token or OAuth) is strongly recommended.", + version='10.0.0', + collection_name='community.general') gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, email=gitlab_user, password=gitlab_password, private_token=gitlab_token, api_version=4) else: From 5f481939d4558e1ae61459cee2a4aecb2f8a7207 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Sun, 19 May 2024 20:48:49 +0200 Subject: [PATCH 084/482] feat(opentelemetry): support flag to export spans in a given file (#8363) * opentelemetry: support flag to create output file this is only to help with adding unit tests * refactor and rename * changelog * rename * fix linting --- .../8363-opentelemetry-export-to-a-file.yml | 2 + plugins/callback/opentelemetry.py | 48 +++++++++++++++---- 2 files changed, 42 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8363-opentelemetry-export-to-a-file.yml diff --git a/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml b/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml new file mode 100644 index 0000000000..b62521ec9f --- /dev/null +++ b/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml @@ -0,0 +1,2 @@ +minor_changes: + - opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363). diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index c3437b7306..cb3d752686 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -84,6 +84,17 @@ DOCUMENTATION = ''' - section: callback_opentelemetry key: disable_attributes_in_logs version_added: 7.1.0 + store_spans_in_file: + default: None + type: str + description: + - It stores the exported spans in the given file + env: + - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE + ini: + - section: callback_opentelemetry + key: store_spans_in_file + version_added: 9.0.0 otel_exporter_otlp_traces_protocol: type: str description: @@ -123,6 +134,7 @@ examples: | ''' import getpass +import json import os import socket import sys @@ -147,9 +159,12 @@ try: from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor + BatchSpanProcessor, + SimpleSpanProcessor + ) + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter ) - # Support for opentelemetry-api <= 1.12 try: from opentelemetry.util._time import _time_ns @@ -280,7 +295,8 @@ class OpenTelemetrySource(object): traceparent, disable_logs, disable_attributes_in_logs, - otel_exporter_otlp_traces_protocol): + otel_exporter_otlp_traces_protocol, + store_spans_in_file): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -296,11 +312,16 @@ class OpenTelemetrySource(object): ) ) - processor = None - if otel_exporter_otlp_traces_protocol == 'grpc': - processor = BatchSpanProcessor(GRPCOTLPSpanExporter()) + otel_exporter = None + if store_spans_in_file: + otel_exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(otel_exporter) else: - processor = BatchSpanProcessor(HTTPOTLPSpanExporter()) + if otel_exporter_otlp_traces_protocol == 'grpc': + otel_exporter = GRPCOTLPSpanExporter() + else: + otel_exporter = HTTPOTLPSpanExporter() + processor = BatchSpanProcessor(otel_exporter) trace.get_tracer_provider().add_span_processor(processor) @@ -322,6 +343,8 @@ class OpenTelemetrySource(object): with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs) + return otel_exporter + def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): """ update the span with the given TaskData and HostData """ @@ -491,6 +514,7 @@ class CallbackModule(CallbackBase): self.errors = 0 self.disabled = False self.traceparent = False + self.store_spans_in_file = False self.otel_exporter_otlp_traces_protocol = None if OTEL_LIBRARY_IMPORT_ERROR: @@ -519,6 +543,8 @@ class CallbackModule(CallbackBase): self.disable_logs = self.get_option('disable_logs') + self.store_spans_in_file = self.get_option('store_spans_in_file') + self.otel_service_name = self.get_option('otel_service_name') if not self.otel_service_name: @@ -610,7 +636,7 @@ class CallbackModule(CallbackBase): status = Status(status_code=StatusCode.OK) else: status = Status(status_code=StatusCode.ERROR) - self.opentelemetry.generate_distributed_traces( + otel_exporter = self.opentelemetry.generate_distributed_traces( self.otel_service_name, self.ansible_playbook, self.tasks_data, @@ -619,7 +645,13 @@ class CallbackModule(CallbackBase): self.disable_logs, self.disable_attributes_in_logs, self.otel_exporter_otlp_traces_protocol, + self.store_spans_in_file ) + if self.store_spans_in_file: + spans = [json.loads(span.to_json()) for span in otel_exporter.get_finished_spans()] + with open(self.store_spans_in_file, "w", encoding="utf-8") as output: + json.dump({"spans": spans}, output, indent=4) + def v2_runner_on_async_failed(self, result, **kwargs): self.errors += 1 From d87b9fe0dce577aad2e58f56626a5df50839a884 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Sun, 19 May 2024 20:49:10 +0200 Subject: [PATCH 085/482] fix(opentelemetry): avoid storing inmemory if logs are disabled (#8373) * fix(opentelemetry): avoid storing inmemory if logs are disabled * changelog * fix syntax * refactor * chore * chore * chore * fix --- changelogs/fragments/8373-honour-disable-logs.yaml | 3 +++ plugins/callback/opentelemetry.py | 12 +++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8373-honour-disable-logs.yaml diff --git a/changelogs/fragments/8373-honour-disable-logs.yaml b/changelogs/fragments/8373-honour-disable-logs.yaml new file mode 100644 index 0000000000..112b10a9f4 --- /dev/null +++ b/changelogs/fragments/8373-honour-disable-logs.yaml @@ -0,0 +1,3 @@ +bugfixes: + - opentelemetry callback plugin - honour the ``disable_logs`` option to avoid storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373). + diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index cb3d752686..54c1690a22 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -555,6 +555,12 @@ class CallbackModule(CallbackBase): self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + def dump_results(self, result): + """ dump the results if disable_logs is not enabled """ + if self.disable_logs: + return "" + return self._dump_results(result._result) + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -604,7 +610,7 @@ class CallbackModule(CallbackBase): self.tasks_data, status, result, - self._dump_results(result._result) + self.dump_results(result) ) def v2_runner_on_ok(self, result): @@ -612,7 +618,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'ok', result, - self._dump_results(result._result) + self.dump_results(result) ) def v2_runner_on_skipped(self, result): @@ -620,7 +626,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'skipped', result, - self._dump_results(result._result) + self.dump_results(result) ) def v2_playbook_on_include(self, included_file): From 03240ad7dc57aefc14b1ceba3e08abec0c62500b Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Sun, 19 May 2024 20:50:03 +0200 Subject: [PATCH 086/482] fix(opentelemetry): close span even if no logs are sent (#8367) * fix(opentelemetry): close span even if no logs are sent * changelog --- changelogs/fragments/8367-fix-close-span-if-no-logs.yaml | 2 ++ plugins/callback/opentelemetry.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8367-fix-close-span-if-no-logs.yaml diff --git a/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml b/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml new file mode 100644 index 0000000000..e0a90be311 --- /dev/null +++ b/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml @@ -0,0 +1,2 @@ +bugfixes: + - "opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367)." diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 54c1690a22..58cfa057b7 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -402,7 +402,8 @@ class OpenTelemetrySource(object): if not disable_logs: # This will avoid populating span attributes to the logs span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes) - span.end(end_time=host_data.finish) + # Close span always + span.end(end_time=host_data.finish) def set_span_attributes(self, span, attributes): """ update the span attributes with the given attributes if not None """ From f82e7a7b83099f2f1fda9a4dfb81586732577bbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sun, 19 May 2024 20:50:41 +0200 Subject: [PATCH 087/482] lint and fix polkit examples (#8381) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * lint and fix polkit examples Signed-off-by: Thomas Sjögren * add changelog fragment Signed-off-by: Thomas Sjögren * remove changelog fragment Signed-off-by: Thomas Sjögren --------- Signed-off-by: Thomas Sjögren --- plugins/become/machinectl.py | 13 +++++++------ plugins/become/run0.py | 12 ++++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index 9b9ac7ec51..e2773ed6a5 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -78,12 +78,13 @@ DOCUMENTATION = ''' EXAMPLES = r''' # A polkit rule needed to use the module with a non-root user. # See the Notes section for details. -60-machinectl-fast-user-auth.rules: | - polkit.addRule(function(action, subject) { - if(action.id == "org.freedesktop.machine1.host-shell" && subject.isInGroup("wheel")) { - return polkit.Result.AUTH_SELF_KEEP; - } - }); +/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: | + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.machine1.host-shell" && + subject.isInGroup("wheel")) { + return polkit.Result.AUTH_SELF_KEEP; + } + }); ''' from re import compile as re_compile diff --git a/plugins/become/run0.py b/plugins/become/run0.py index 1d6d7cb754..a718e86f24 100644 --- a/plugins/become/run0.py +++ b/plugins/become/run0.py @@ -69,13 +69,13 @@ EXAMPLES = r""" # An example polkit rule that allows the user 'ansible' in the 'wheel' group # to execute commands using run0 without authentication. /etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: | - polkit.addRule(function(action, subject) { - if(action.id == "org.freedesktop.systemd1.manage-units" && - subject.isInGroup("wheel") - subject.user == "ansible") { + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.systemd1.manage-units" && + subject.isInGroup("wheel") && + subject.user == "ansible") { return polkit.Result.YES; - } - }); + } + }); """ from re import compile as re_compile From bebe162a226cce4f43e8e5cb4714e793a401af4d Mon Sep 17 00:00:00 2001 From: Kevin Wise <41447823+Klistel@users.noreply.github.com> Date: Sun, 19 May 2024 23:17:08 -0700 Subject: [PATCH 088/482] Add toggle for verbose logging to pkg5.py (#8382) * Add toggle for verbose logging Updated params with 'verbose' mode (defaults to False, which is existing behavior) to allow users to toggle verbose to True, which disables the '-q' flag that was hardcoded in the original module * Create 8379-verbose-mode-pkg5.yml * update pkg5.py to conform to PEP8 length requirements The new verbosity argument on line 172 broke the 160 character length PEP8 requirement - split the line in two to conform to PEP8 * Add PR link to changelog fragement yml Co-authored-by: Felix Fontein * Add version_added and make the description of the verbose param clearer * Update pkg5.py verbose description to conform to ansible documentation for semantic markup Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8379-verbose-mode-pkg5.yml | 2 ++ plugins/modules/pkg5.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8379-verbose-mode-pkg5.yml diff --git a/changelogs/fragments/8379-verbose-mode-pkg5.yml b/changelogs/fragments/8379-verbose-mode-pkg5.yml new file mode 100644 index 0000000000..abc1c61dce --- /dev/null +++ b/changelogs/fragments/8379-verbose-mode-pkg5.yml @@ -0,0 +1,2 @@ +minor_changes: + - pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, https://github.com/ansible-collections/community.general/pull/8382). diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py index c4aace9f28..08fa9272f7 100644 --- a/plugins/modules/pkg5.py +++ b/plugins/modules/pkg5.py @@ -54,6 +54,12 @@ options: - Refresh publishers before execution. type: bool default: true + verbose: + description: + - Set to V(true) to disable quiet execution. + type: bool + default: false + version_added: 9.0.0 ''' EXAMPLES = ''' - name: Install Vim @@ -90,6 +96,7 @@ def main(): accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), be_name=dict(type='str'), refresh=dict(type='bool', default=True), + verbose=dict(type='bool', default=False), ), supports_check_mode=True, ) @@ -156,9 +163,15 @@ def ensure(module, state, packages, params): else: no_refresh = ['--no-refresh'] + if params['verbose']: + verbosity = [] + else: + verbosity = ['-q'] + to_modify = list(filter(behaviour[state]['filter'], packages)) if to_modify: - rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify) + rc, out, err = module.run_command( + ['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + verbosity + ['--'] + to_modify) response['rc'] = rc response['results'].append(out) response['msg'] += err From 861f55eb049fcc0b20cc647d31d71a09a65f1818 Mon Sep 17 00:00:00 2001 From: rippleFCL Date: Mon, 20 May 2024 07:30:59 +0100 Subject: [PATCH 089/482] added usb support to proxmox module (#8199) * added usb support to proxmox module * forgot docs * fixed net refrances * changed usb to USB * added changelog fragment Co-authored-by: Felix Fontein * added changelog fragment * corrected spelling and punctuation Co-authored-by: Felix Fontein * Another one. --------- Co-authored-by: ripplefcl Co-authored-by: Felix Fontein --- .../8199-added-usb-support-to-proxmox-module.yml | 2 ++ plugins/modules/proxmox_kvm.py | 14 +++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml diff --git a/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml b/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml new file mode 100644 index 0000000000..b621fe284c --- /dev/null +++ b/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml @@ -0,0 +1,2 @@ +minor_changes: + - "proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM VMs (https://github.com/ansible-collections/community.general/pull/8199)." diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 253a75d4b3..9fe805c7ab 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -519,6 +519,16 @@ options: default: '2.0' type: dict version_added: 7.1.0 + usb: + description: + - A hash/dictionary of USB devices for the VM. O(usb='{"key":"value", "key":"value"}'). + - Keys allowed are - C(usb[n]) where 0 ≤ n ≤ N. + - Values allowed are - C(host="value|spice",mapping="value",usb3="1|0"). + - host is either C(spice) or the USB id/port. + - Option C(mapping) is the mapped USB device name. + - Option C(usb3) enables USB 3 support. + type: dict + version_added: 9.0.0 update: description: - If V(true), the VM will be updated with new value. @@ -1094,7 +1104,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): ) # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n], usb[n] for k in list(kwargs.keys()): if isinstance(kwargs[k], dict): kwargs.update(kwargs[k]) @@ -1311,6 +1321,7 @@ def main(): storage=dict(type='str', required=True), version=dict(type='str', choices=['2.0', '1.2'], default='2.0') )), + usb=dict(type='dict'), update=dict(type='bool', default=False), update_unsafe=dict(type='bool', default=False), vcpus=dict(type='int'), @@ -1516,6 +1527,7 @@ def main(): tdf=module.params['tdf'], template=module.params['template'], tpmstate0=module.params['tpmstate0'], + usb=module.params['usb'], vcpus=module.params['vcpus'], vga=module.params['vga'], virtio=module.params['virtio'], From 704a5acc631a0febc99fba64ec2fec392c9d0c2b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 20 May 2024 08:43:52 +0200 Subject: [PATCH 090/482] From now on automatically add period to new plugins in changelog, and use FQCNs (#8392) From now on automatically add period to new plugins in changelog, and use FQCNs. --- changelogs/config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 23afe36d29..2cef6e26f4 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -35,3 +35,6 @@ sections: - - known_issues - Known Issues title: Community General +trivial_section_name: trivial +use_fqcn: true +add_plugin_period: true From 9b0c9838603a0858a7cf3e3362a9c6c2f83566d3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 20 May 2024 09:31:23 +0200 Subject: [PATCH 091/482] Move 'docker' homebrew tests into homebrew tests (#8395) Move 'docker' homebrew tests into homebrew tests. --- .../integration/targets/docker/tasks/main.yml | 39 ------------------- .../{docker => homebrew}/handlers/main.yml | 0 .../targets/homebrew/tasks/casks.yml | 2 - .../targets/homebrew/tasks/docker.yml | 37 ++++++++++++++++++ .../targets/homebrew/tasks/formulae.yml | 2 - .../targets/homebrew/tasks/main.yml | 5 +-- 6 files changed, 39 insertions(+), 46 deletions(-) delete mode 100644 tests/integration/targets/docker/tasks/main.yml rename tests/integration/targets/{docker => homebrew}/handlers/main.yml (100%) create mode 100644 tests/integration/targets/homebrew/tasks/docker.yml diff --git a/tests/integration/targets/docker/tasks/main.yml b/tests/integration/targets/docker/tasks/main.yml deleted file mode 100644 index fd636247f4..0000000000 --- a/tests/integration/targets/docker/tasks/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- when: ansible_facts.distribution == 'MacOSX' - block: - - name: MACOS | Find brew binary - command: which brew - register: brew_which - - - name: MACOS | Get owner of brew binary - stat: - path: "{{ brew_which.stdout }}" - register: brew_stat - - - name: MACOS | Install docker without --formula - community.general.homebrew: - name: docker - state: present - become: true - become_user: "{{ brew_stat.stat.pw_name }}" - ignore_errors: true - register: result - - - name: Check that installing docker without --formula raises warning - assert: - that: - - result is failed - - - name: MACOS | Install docker - community.general.homebrew: - name: docker - state: present - force_formula: true - become: true - become_user: "{{ brew_stat.stat.pw_name }}" - notify: - - uninstall docker diff --git a/tests/integration/targets/docker/handlers/main.yml b/tests/integration/targets/homebrew/handlers/main.yml similarity index 100% rename from tests/integration/targets/docker/handlers/main.yml rename to tests/integration/targets/homebrew/handlers/main.yml diff --git a/tests/integration/targets/homebrew/tasks/casks.yml b/tests/integration/targets/homebrew/tasks/casks.yml index 42d3515bf2..ffbe67d158 100644 --- a/tests/integration/targets/homebrew/tasks/casks.yml +++ b/tests/integration/targets/homebrew/tasks/casks.yml @@ -12,13 +12,11 @@ - name: Find brew binary command: which brew register: brew_which - when: ansible_distribution in ['MacOSX'] - name: Get owner of brew binary stat: path: "{{ brew_which.stdout }}" register: brew_stat - when: ansible_distribution in ['MacOSX'] #- name: Use ignored-pinned option while upgrading all # homebrew: diff --git a/tests/integration/targets/homebrew/tasks/docker.yml b/tests/integration/targets/homebrew/tasks/docker.yml new file mode 100644 index 0000000000..3b9e2ea6b4 --- /dev/null +++ b/tests/integration/targets/homebrew/tasks/docker.yml @@ -0,0 +1,37 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: MACOS | Find brew binary + command: which brew + register: brew_which + +- name: MACOS | Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + +- name: MACOS | Install docker without --formula + community.general.homebrew: + name: docker + state: present + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + ignore_errors: true + register: result + +- name: Check that installing docker without --formula raises warning + assert: + that: + - result is failed + +- name: MACOS | Install docker + community.general.homebrew: + name: docker + state: present + force_formula: true + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + notify: + - uninstall docker diff --git a/tests/integration/targets/homebrew/tasks/formulae.yml b/tests/integration/targets/homebrew/tasks/formulae.yml index 1db3ef1a6a..1ca8d753e7 100644 --- a/tests/integration/targets/homebrew/tasks/formulae.yml +++ b/tests/integration/targets/homebrew/tasks/formulae.yml @@ -12,13 +12,11 @@ - name: Find brew binary command: which brew register: brew_which - when: ansible_distribution in ['MacOSX'] - name: Get owner of brew binary stat: path: "{{ brew_which.stdout }}" register: brew_stat - when: ansible_distribution in ['MacOSX'] #- name: Use ignored-pinned option while upgrading all # homebrew: diff --git a/tests/integration/targets/homebrew/tasks/main.yml b/tests/integration/targets/homebrew/tasks/main.yml index f5479917ea..00d0bcf31c 100644 --- a/tests/integration/targets/homebrew/tasks/main.yml +++ b/tests/integration/targets/homebrew/tasks/main.yml @@ -9,9 +9,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- block: - - include_tasks: 'formulae.yml' - - when: ansible_distribution in ['MacOSX'] block: + - include_tasks: 'formulae.yml' - include_tasks: 'casks.yml' + - include_tasks: 'docker.yml' From 22ed5048a290c8b46a2bab52881f65620944ebec Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 20 May 2024 12:35:31 +0200 Subject: [PATCH 092/482] Clean up main branch, next expected release will be 9.1.0. --- changelogs/changelog.yaml | 2 +- ...0-redhat_subscription-dbus-on-7.4-plus.yaml | 6 ------ .../5588-support-1password-connect.yml | 3 --- .../6572-nmcli-add-support-loopback-type.yml | 2 -- changelogs/fragments/7143-proxmox-template.yml | 3 --- ...z_permission-incorrect-resource-payload.yml | 2 -- ...199-gitlab-runner-new-creation-workflow.yml | 2 -- ...ulti-values-for-same-name-in-git-config.yml | 4 ---- ...issue-with-creating-a-wifi-bridge-slave.yml | 2 -- ...y_provider-mapper-reconfiguration-fixes.yml | 3 --- ...and-preserve-options-for-passwordstore.yaml | 2 -- .../fragments/7456-add-ssh-control-master.yml | 2 -- ...61-proxmox-inventory-add-exclude-nodes.yaml | 2 -- ...er-in-LXC-container-clone-of-ProxmoxVE.yaml | 2 -- ...4-fix-vm-removal-in-proxmox_pool_member.yml | 2 -- ...sh-firmware-update-message-id-hardening.yml | 2 -- .../7467-fix-gitlab-constants-calls.yml | 5 ----- .../7472-gitlab-add-ca-path-option.yml | 2 -- .../fragments/7485-proxmox_vm_info-config.yml | 2 -- .../7486-gitlab-refactor-package-check.yml | 2 -- .../fragments/7489-netcup-dns-record-types.yml | 2 -- .../7495-proxmox_disk-manipulate-cdrom.yml | 2 -- ...tting-on-bond-and-infiniband-interfaces.yml | 2 -- changelogs/fragments/7501-type.yml | 2 -- .../fragments/7505-ini_file-section_has.yml | 5 ----- changelogs/fragments/7506-pipx-pipargs.yml | 2 -- .../fragments/7517-elastic-close-client.yaml | 2 -- ...ne-string-handling-in-complex-variables.yml | 2 -- .../7538-add-krbprincipalattribute-option.yml | 2 -- .../fragments/7540-proxmox-update-config.yml | 2 -- .../fragments/7542-irc-logentries-ssl.yml | 3 --- .../7550-irc-use_tls-validate_certs.yml | 5 ----- ...64-onepassword-lookup-case-insensitive.yaml | 4 ---- .../7569-infiniband-slave-support.yml | 2 -- .../fragments/7577-fix-apt_rpm-module.yml | 2 -- changelogs/fragments/7578-irc-tls.yml | 4 ---- ...g-new-choice-passkey-to-ipauserauthtype.yml | 2 -- ...ices-idp-and-passkey-to-ipauserauthtype.yml | 2 -- .../fragments/7600-proxmox_kvm-hookscript.yml | 2 -- changelogs/fragments/7601-lvol-fix.yml | 2 -- .../fragments/7612-interface_file-method.yml | 2 -- ...redfish-info-add-boot-progress-property.yml | 2 -- ...x-keycloak-api-client-to-quote-properly.yml | 2 -- ...45-Keycloak-print-error-msg-from-server.yml | 2 -- .../7646-fix-order-number-detection-in-dn.yml | 2 -- .../fragments/7653-fix-cloudflare-lookup.yml | 2 -- changelogs/fragments/7676-lvol-pvs-as-list.yml | 2 -- .../7683-added-contenttype-parameter.yml | 2 -- ...oid-attempt-to-delete-non-existing-user.yml | 2 -- ...7698-improvements-to-keycloak_realm_key.yml | 4 ---- ...703-ssh_config_add_keys_to_agent_option.yml | 2 -- .../7704-ssh_config_identities_only_option.yml | 2 -- .../fragments/7717-prevent-modprobe-error.yml | 2 -- ...723-ipa-pwpolicy-update-pwpolicy-module.yml | 3 --- .../7737-add-ipa-dnsrecord-ns-type.yml | 2 -- ...40-add-message-id-header-to-mail-module.yml | 2 -- .../7746-raw_post-without-actions.yml | 2 -- .../fragments/7754-fixed-payload-format.yml | 2 -- changelogs/fragments/7765-mail-message-id.yml | 2 -- .../fragments/7782-cloudflare_dns-spf.yml | 2 -- ...ak-user-federation-custom-provider-type.yml | 2 -- .../7790-gitlab-runner-api-pagination.yml | 8 -------- ...state-template-will-check-status-first.yaml | 2 -- .../fragments/7797-ipa-fix-otp-idempotency.yml | 2 -- changelogs/fragments/7821-mssql_script-py2.yml | 2 -- .../7826-consul-modules-refactoring.yaml | 7 ------- .../7843-proxmox_kvm-update_unsafe.yml | 2 -- .../fragments/7847-gitlab-issue-title.yml | 2 -- .../7870-homebrew-cask-installed-detection.yml | 2 -- ...mox_fix-update-if-setting-doesnt-exist.yaml | 2 -- ...nventory_hostname_as_literal_in_remotes.yml | 2 -- ...-ipa-fix-sudo-and-hbcalrule-idempotence.yml | 3 --- .../7881-fix-keycloak-client-ckeckmode.yml | 2 -- ...-add-redfish-get-service-identification.yml | 2 -- .../fragments/7896-add-terraform-diff-mode.yml | 2 -- .../fragments/7897-consul-action-group.yaml | 2 -- .../fragments/7901-consul-acl-deprecation.yaml | 3 --- ...-add-redfish-set-service-identification.yml | 2 -- .../7919-onepassword-fieldname-casing.yaml | 2 -- .../7951-fix-redfish_info-exception.yml | 2 -- .../7953-proxmox_kvm-fix_status_check.yml | 2 -- ...es_events-option-to-gitlab_hook-module.yaml | 2 -- .../7963-fix-terraform-diff-absent.yml | 2 -- .../7970-fix-cargo-path-idempotency.yaml | 10 ---------- ...-add-mssql_script-transactional-support.yml | 2 -- .../7983-sudoers-add-support-noexec.yml | 2 -- .../fragments/7994-bitwarden-session-arg.yaml | 2 -- ...templating-support-to-icinga2-inventory.yml | 2 -- ...inventory-group_by_hostgroups-parameter.yml | 2 -- ...edfish-get-update-status-empty-response.yml | 2 -- ...13-bitwarden-full-collection-item-list.yaml | 2 -- .../8029-iptables-state-restore-check-mode.yml | 2 -- changelogs/fragments/8038-proxmox-startup.yml | 2 -- ...-module-error-reporting-on-become-true.yaml | 2 -- .../fragments/8057-pam_limits-check-mode.yml | 2 -- changelogs/fragments/8073-ldap-attrs-diff.yml | 2 -- ...75-optional-space-around-section-names.yaml | 2 -- ...8087-removed-redundant-unicode-prefixes.yml | 2 -- .../fragments/8091-consul-token-fixes.yaml | 2 -- ...100-haproxy-drain-fails-on-down-backend.yml | 2 -- ...-java_cert-enable-owner-group-mode-args.yml | 2 -- .../8118-fix-bond-slave-honoring-mtu.yml | 2 -- .../8126-filesystem-bcachefs-support.yaml | 2 -- ...or-message-for-linode-inventory-plugin.yaml | 3 --- .../fragments/8151-fix-lsvg_cmd-failed.yml | 2 -- .../8153-java_cert-add-cert_content-arg.yml | 2 -- .../8154-add-ovs-commands-to-nmcli-module.yml | 2 -- .../fragments/8158-gitlab-version-check.yml | 2 -- ...-redfish-implementing-reset-to-defaults.yml | 2 -- ...66-password-store-lookup-missing-subkey.yml | 2 -- changelogs/fragments/8169-lxml.yml | 2 -- .../fragments/8173-osx_defaults-check_type.yml | 2 -- .../fragments/8175-get_ipa_version_regex.yml | 2 -- changelogs/fragments/8183-from_ini_to_ini.yml | 3 --- .../8188-bitwarden-add-organization_id.yml | 2 -- ...4-redfish-add-multipart-to-capabilities.yml | 2 -- ...199-added-usb-support-to-proxmox-module.yml | 2 -- .../8211-riak-admin-sub-command-support.yml | 2 -- .../fragments/8215-add-docker-v2-protocol.yml | 2 -- changelogs/fragments/8222-datetime.yml | 3 --- ...cloak_client-additional-normalizations.yaml | 2 -- ...8224-keycloak_realm-add-normalizations.yaml | 2 -- changelogs/fragments/8225-unsafe.yml | 2 -- changelogs/fragments/8226-mh-vardict.yml | 10 ---------- .../fragments/8236-portage-select-feature.yml | 2 -- ...s-manager-rate-limit-retry-with-backoff.yml | 2 -- changelogs/fragments/8247-apt_rpm-latest.yml | 6 ------ ...-ssh-config-hostkey-support-accept-new.yaml | 2 -- .../fragments/8263-apt_rpm-install-check.yml | 2 -- changelogs/fragments/8264-run_command.yml | 14 -------------- .../fragments/8274-homebrew-force-formula.yml | 2 -- changelogs/fragments/8280-mh-deprecations.yml | 8 -------- .../fragments/8281-puppet-waitforlock.yaml | 2 -- .../fragments/8285-apt_rpm-state-deprecate.yml | 7 ------- .../8288-cmdrunner-fmt-list-len-limits.yml | 2 -- changelogs/fragments/8289-python-runner.yml | 2 -- ...290-gandi-livedns-personal-access-token.yml | 2 -- .../8303-fix-rendering-foreign-variables.yaml | 2 -- ...eration-fix-diff-krbPrincipalAttribute.yaml | 2 -- .../8321-fix-opentelemetry-callback.yml | 2 -- ...23-refactor-homebrew-logic-module-utils.yml | 2 -- .../fragments/8334-proxmox-action-group.yml | 2 -- .../fragments/8355-keycloak-idp-sanitize.yaml | 2 -- .../8363-opentelemetry-export-to-a-file.yml | 2 -- .../8367-fix-close-span-if-no-logs.yaml | 2 -- .../fragments/8373-honour-disable-logs.yaml | 3 --- .../fragments/8379-verbose-mode-pkg5.yml | 2 -- .../8383-deprecate-gitlab-basic-auth.yml | 2 -- changelogs/fragments/9.0.0.yml | 1 - .../fragments/add-ipa-sudorule-deny-cmd.yml | 2 -- .../fragments/aix_filesystem-crfs-issue.yml | 3 --- .../bitwarden-lookup-performance.yaml | 2 -- changelogs/fragments/hipchat.yml | 4 ---- changelogs/fragments/internal-redirects.yml | 5 ----- changelogs/fragments/inventory-rce.yml | 6 ------ ...nstance-not-found-avoid-false-positives.yml | 2 -- .../lxd-instances-api-endpoint-added.yml | 2 -- changelogs/fragments/pacemaker-cluster.yml | 3 --- changelogs/fragments/pkgin.yml | 2 -- changelogs/fragments/puppet_lang_force.yml | 3 --- changelogs/fragments/remove_deprecated.yml | 18 ------------------ galaxy.yml | 2 +- 162 files changed, 2 insertions(+), 435 deletions(-) delete mode 100644 changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml delete mode 100644 changelogs/fragments/5588-support-1password-connect.yml delete mode 100644 changelogs/fragments/6572-nmcli-add-support-loopback-type.yml delete mode 100644 changelogs/fragments/7143-proxmox-template.yml delete mode 100644 changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml delete mode 100644 changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml delete mode 100644 changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml delete mode 100644 changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml delete mode 100644 changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml delete mode 100644 changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml delete mode 100644 changelogs/fragments/7456-add-ssh-control-master.yml delete mode 100644 changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml delete mode 100644 changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml delete mode 100644 changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml delete mode 100644 changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml delete mode 100644 changelogs/fragments/7467-fix-gitlab-constants-calls.yml delete mode 100644 changelogs/fragments/7472-gitlab-add-ca-path-option.yml delete mode 100644 changelogs/fragments/7485-proxmox_vm_info-config.yml delete mode 100644 changelogs/fragments/7486-gitlab-refactor-package-check.yml delete mode 100644 changelogs/fragments/7489-netcup-dns-record-types.yml delete mode 100644 changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml delete mode 100644 changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml delete mode 100644 changelogs/fragments/7501-type.yml delete mode 100644 changelogs/fragments/7505-ini_file-section_has.yml delete mode 100644 changelogs/fragments/7506-pipx-pipargs.yml delete mode 100644 changelogs/fragments/7517-elastic-close-client.yaml delete mode 100644 changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml delete mode 100644 changelogs/fragments/7538-add-krbprincipalattribute-option.yml delete mode 100644 changelogs/fragments/7540-proxmox-update-config.yml delete mode 100644 changelogs/fragments/7542-irc-logentries-ssl.yml delete mode 100644 changelogs/fragments/7550-irc-use_tls-validate_certs.yml delete mode 100644 changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml delete mode 100644 changelogs/fragments/7569-infiniband-slave-support.yml delete mode 100644 changelogs/fragments/7577-fix-apt_rpm-module.yml delete mode 100644 changelogs/fragments/7578-irc-tls.yml delete mode 100644 changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml delete mode 100644 changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml delete mode 100644 changelogs/fragments/7600-proxmox_kvm-hookscript.yml delete mode 100644 changelogs/fragments/7601-lvol-fix.yml delete mode 100644 changelogs/fragments/7612-interface_file-method.yml delete mode 100644 changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml delete mode 100644 changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml delete mode 100644 changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml delete mode 100644 changelogs/fragments/7646-fix-order-number-detection-in-dn.yml delete mode 100644 changelogs/fragments/7653-fix-cloudflare-lookup.yml delete mode 100644 changelogs/fragments/7676-lvol-pvs-as-list.yml delete mode 100644 changelogs/fragments/7683-added-contenttype-parameter.yml delete mode 100644 changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml delete mode 100644 changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml delete mode 100644 changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml delete mode 100644 changelogs/fragments/7704-ssh_config_identities_only_option.yml delete mode 100644 changelogs/fragments/7717-prevent-modprobe-error.yml delete mode 100644 changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml delete mode 100644 changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml delete mode 100644 changelogs/fragments/7740-add-message-id-header-to-mail-module.yml delete mode 100644 changelogs/fragments/7746-raw_post-without-actions.yml delete mode 100644 changelogs/fragments/7754-fixed-payload-format.yml delete mode 100644 changelogs/fragments/7765-mail-message-id.yml delete mode 100644 changelogs/fragments/7782-cloudflare_dns-spf.yml delete mode 100644 changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml delete mode 100644 changelogs/fragments/7790-gitlab-runner-api-pagination.yml delete mode 100644 changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml delete mode 100644 changelogs/fragments/7797-ipa-fix-otp-idempotency.yml delete mode 100644 changelogs/fragments/7821-mssql_script-py2.yml delete mode 100644 changelogs/fragments/7826-consul-modules-refactoring.yaml delete mode 100644 changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml delete mode 100644 changelogs/fragments/7847-gitlab-issue-title.yml delete mode 100644 changelogs/fragments/7870-homebrew-cask-installed-detection.yml delete mode 100644 changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml delete mode 100644 changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml delete mode 100644 changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml delete mode 100644 changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml delete mode 100644 changelogs/fragments/7882-add-redfish-get-service-identification.yml delete mode 100644 changelogs/fragments/7896-add-terraform-diff-mode.yml delete mode 100644 changelogs/fragments/7897-consul-action-group.yaml delete mode 100644 changelogs/fragments/7901-consul-acl-deprecation.yaml delete mode 100644 changelogs/fragments/7916-add-redfish-set-service-identification.yml delete mode 100644 changelogs/fragments/7919-onepassword-fieldname-casing.yaml delete mode 100644 changelogs/fragments/7951-fix-redfish_info-exception.yml delete mode 100644 changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml delete mode 100644 changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml delete mode 100644 changelogs/fragments/7963-fix-terraform-diff-absent.yml delete mode 100644 changelogs/fragments/7970-fix-cargo-path-idempotency.yaml delete mode 100644 changelogs/fragments/7976-add-mssql_script-transactional-support.yml delete mode 100644 changelogs/fragments/7983-sudoers-add-support-noexec.yml delete mode 100644 changelogs/fragments/7994-bitwarden-session-arg.yaml delete mode 100644 changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml delete mode 100644 changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml delete mode 100644 changelogs/fragments/8003-redfish-get-update-status-empty-response.yml delete mode 100644 changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml delete mode 100644 changelogs/fragments/8029-iptables-state-restore-check-mode.yml delete mode 100644 changelogs/fragments/8038-proxmox-startup.yml delete mode 100644 changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml delete mode 100644 changelogs/fragments/8057-pam_limits-check-mode.yml delete mode 100644 changelogs/fragments/8073-ldap-attrs-diff.yml delete mode 100644 changelogs/fragments/8075-optional-space-around-section-names.yaml delete mode 100644 changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml delete mode 100644 changelogs/fragments/8091-consul-token-fixes.yaml delete mode 100644 changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml delete mode 100644 changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml delete mode 100644 changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml delete mode 100644 changelogs/fragments/8126-filesystem-bcachefs-support.yaml delete mode 100644 changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml delete mode 100644 changelogs/fragments/8151-fix-lsvg_cmd-failed.yml delete mode 100644 changelogs/fragments/8153-java_cert-add-cert_content-arg.yml delete mode 100644 changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml delete mode 100644 changelogs/fragments/8158-gitlab-version-check.yml delete mode 100644 changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml delete mode 100644 changelogs/fragments/8166-password-store-lookup-missing-subkey.yml delete mode 100644 changelogs/fragments/8169-lxml.yml delete mode 100644 changelogs/fragments/8173-osx_defaults-check_type.yml delete mode 100644 changelogs/fragments/8175-get_ipa_version_regex.yml delete mode 100644 changelogs/fragments/8183-from_ini_to_ini.yml delete mode 100644 changelogs/fragments/8188-bitwarden-add-organization_id.yml delete mode 100644 changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml delete mode 100644 changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml delete mode 100644 changelogs/fragments/8211-riak-admin-sub-command-support.yml delete mode 100644 changelogs/fragments/8215-add-docker-v2-protocol.yml delete mode 100644 changelogs/fragments/8222-datetime.yml delete mode 100644 changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml delete mode 100644 changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml delete mode 100644 changelogs/fragments/8225-unsafe.yml delete mode 100644 changelogs/fragments/8226-mh-vardict.yml delete mode 100644 changelogs/fragments/8236-portage-select-feature.yml delete mode 100644 changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml delete mode 100644 changelogs/fragments/8247-apt_rpm-latest.yml delete mode 100644 changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml delete mode 100644 changelogs/fragments/8263-apt_rpm-install-check.yml delete mode 100644 changelogs/fragments/8264-run_command.yml delete mode 100644 changelogs/fragments/8274-homebrew-force-formula.yml delete mode 100644 changelogs/fragments/8280-mh-deprecations.yml delete mode 100644 changelogs/fragments/8281-puppet-waitforlock.yaml delete mode 100644 changelogs/fragments/8285-apt_rpm-state-deprecate.yml delete mode 100644 changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml delete mode 100644 changelogs/fragments/8289-python-runner.yml delete mode 100644 changelogs/fragments/8290-gandi-livedns-personal-access-token.yml delete mode 100644 changelogs/fragments/8303-fix-rendering-foreign-variables.yaml delete mode 100644 changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml delete mode 100644 changelogs/fragments/8321-fix-opentelemetry-callback.yml delete mode 100644 changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml delete mode 100644 changelogs/fragments/8334-proxmox-action-group.yml delete mode 100644 changelogs/fragments/8355-keycloak-idp-sanitize.yaml delete mode 100644 changelogs/fragments/8363-opentelemetry-export-to-a-file.yml delete mode 100644 changelogs/fragments/8367-fix-close-span-if-no-logs.yaml delete mode 100644 changelogs/fragments/8373-honour-disable-logs.yaml delete mode 100644 changelogs/fragments/8379-verbose-mode-pkg5.yml delete mode 100644 changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml delete mode 100644 changelogs/fragments/9.0.0.yml delete mode 100644 changelogs/fragments/add-ipa-sudorule-deny-cmd.yml delete mode 100644 changelogs/fragments/aix_filesystem-crfs-issue.yml delete mode 100644 changelogs/fragments/bitwarden-lookup-performance.yaml delete mode 100644 changelogs/fragments/hipchat.yml delete mode 100644 changelogs/fragments/internal-redirects.yml delete mode 100644 changelogs/fragments/inventory-rce.yml delete mode 100644 changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml delete mode 100644 changelogs/fragments/lxd-instances-api-endpoint-added.yml delete mode 100644 changelogs/fragments/pacemaker-cluster.yml delete mode 100644 changelogs/fragments/pkgin.yml delete mode 100644 changelogs/fragments/puppet_lang_force.yml delete mode 100644 changelogs/fragments/remove_deprecated.yml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 02bd8e7803..8d0ae9aa6d 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,2 @@ -ancestor: 8.0.0 +ancestor: 9.0.0 releases: {} diff --git a/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml b/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml deleted file mode 100644 index 64390308d7..0000000000 --- a/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - | - redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4 and - greater; older versions of RHEL 7 do not have it - (https://github.com/ansible-collections/community.general/issues/7622, - https://github.com/ansible-collections/community.general/pull/7624). diff --git a/changelogs/fragments/5588-support-1password-connect.yml b/changelogs/fragments/5588-support-1password-connect.yml deleted file mode 100644 index bec2300d3f..0000000000 --- a/changelogs/fragments/5588-support-1password-connect.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - onepassword lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116). - - onepassword_raw lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116) diff --git a/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml b/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml deleted file mode 100644 index 4382851d68..0000000000 --- a/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572). diff --git a/changelogs/fragments/7143-proxmox-template.yml b/changelogs/fragments/7143-proxmox-template.yml deleted file mode 100644 index 89d44594d3..0000000000 --- a/changelogs/fragments/7143-proxmox-template.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion of container to a template (https://github.com/ansible-collections/community.general/pull/7143). - - proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143). diff --git a/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml b/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml deleted file mode 100644 index 2fa50a47ee..0000000000 --- a/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_authz_permission - resource payload variable for scope-based permission was constructed as a string, when it needs to be a list, even for a single item (https://github.com/ansible-collections/community.general/issues/7151). diff --git a/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml b/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml deleted file mode 100644 index d4c5f96f9d..0000000000 --- a/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199). diff --git a/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml b/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml deleted file mode 100644 index be3dfdcac9..0000000000 --- a/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - "git_config - allow multiple git configs for the same name with the new ``add_mode`` option (https://github.com/ansible-collections/community.general/pull/7260)." - - "git_config - the ``after`` and ``before`` fields in the ``diff`` of the return value can be a list instead of a string in case more configs with the same key are affected (https://github.com/ansible-collections/community.general/pull/7260)." - - "git_config - when a value is unset, all configs with the same key are unset (https://github.com/ansible-collections/community.general/pull/7260)." diff --git a/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml b/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml deleted file mode 100644 index f5f07dc230..0000000000 --- a/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389). \ No newline at end of file diff --git a/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml b/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml deleted file mode 100644 index 30f3673499..0000000000 --- a/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - keycloak_identity_provider - it was not possible to reconfigure (add, remove) ``mappers`` once they were created initially. Removal was ignored, adding new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418). - - keycloak_identity_provider - ``mappers`` processing was not idempotent if the mappers configuration list had not been sorted by name (in ascending order). Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418). \ No newline at end of file diff --git a/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml b/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml deleted file mode 100644 index 59e22b450f..0000000000 --- a/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify the stored password format (https://github.com/ansible-collections/community.general/pull/7426). \ No newline at end of file diff --git a/changelogs/fragments/7456-add-ssh-control-master.yml b/changelogs/fragments/7456-add-ssh-control-master.yml deleted file mode 100644 index de6399e2bd..0000000000 --- a/changelogs/fragments/7456-add-ssh-control-master.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist`` parameters (https://github.com/ansible-collections/community.general/pull/7456). diff --git a/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml b/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml deleted file mode 100644 index 40391342f7..0000000000 --- a/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - adds an option to exclude nodes from the dynamic inventory generation. The new setting is optional, not using this option will behave as usual (https://github.com/ansible-collections/community.general/issues/6714, https://github.com/ansible-collections/community.general/pull/7461). diff --git a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml deleted file mode 100644 index 20a9b1d144..0000000000 --- a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_ostype - it is now possible to specify the ``ostype`` when creating an LXC container (https://github.com/ansible-collections/community.general/pull/7462). diff --git a/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml b/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml deleted file mode 100644 index b42abc88c0..0000000000 --- a/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_pool_member - absent state for type VM did not delete VMs from the pools (https://github.com/ansible-collections/community.general/pull/7464). diff --git a/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml b/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml deleted file mode 100644 index 01a98c2225..0000000000 --- a/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate`` commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465, https://github.com/ansible-collections/community.general/pull/7471). diff --git a/changelogs/fragments/7467-fix-gitlab-constants-calls.yml b/changelogs/fragments/7467-fix-gitlab-constants-calls.yml deleted file mode 100644 index 77466f75e6..0000000000 --- a/changelogs/fragments/7467-fix-gitlab-constants-calls.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467). diff --git a/changelogs/fragments/7472-gitlab-add-ca-path-option.yml b/changelogs/fragments/7472-gitlab-add-ca-path-option.yml deleted file mode 100644 index 48c041ea31..0000000000 --- a/changelogs/fragments/7472-gitlab-add-ca-path-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472). diff --git a/changelogs/fragments/7485-proxmox_vm_info-config.yml b/changelogs/fragments/7485-proxmox_vm_info-config.yml deleted file mode 100644 index ca2fd3dc57..0000000000 --- a/changelogs/fragments/7485-proxmox_vm_info-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485). diff --git a/changelogs/fragments/7486-gitlab-refactor-package-check.yml b/changelogs/fragments/7486-gitlab-refactor-package-check.yml deleted file mode 100644 index 25b52ac45c..0000000000 --- a/changelogs/fragments/7486-gitlab-refactor-package-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486). diff --git a/changelogs/fragments/7489-netcup-dns-record-types.yml b/changelogs/fragments/7489-netcup-dns-record-types.yml deleted file mode 100644 index b065a4d239..0000000000 --- a/changelogs/fragments/7489-netcup-dns-record-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489). \ No newline at end of file diff --git a/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml b/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml deleted file mode 100644 index f3a5b27609..0000000000 --- a/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495). diff --git a/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml b/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml deleted file mode 100644 index f12aa55760..0000000000 --- a/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7499). diff --git a/changelogs/fragments/7501-type.yml b/changelogs/fragments/7501-type.yml deleted file mode 100644 index 994c31ce5a..0000000000 --- a/changelogs/fragments/7501-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()`` calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501)." diff --git a/changelogs/fragments/7505-ini_file-section_has.yml b/changelogs/fragments/7505-ini_file-section_has.yml deleted file mode 100644 index 0424764fd0..0000000000 --- a/changelogs/fragments/7505-ini_file-section_has.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - "ini_file - add an optional parameter ``section_has_values``. If the - target ini file contains more than one ``section``, use ``section_has_values`` - to specify which one should be updated - (https://github.com/ansible-collections/community.general/pull/7505)." diff --git a/changelogs/fragments/7506-pipx-pipargs.yml b/changelogs/fragments/7506-pipx-pipargs.yml deleted file mode 100644 index fb5cb52e6f..0000000000 --- a/changelogs/fragments/7506-pipx-pipargs.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pipx module utils - change the CLI argument formatter for the ``pip_args`` parameter (https://github.com/ansible-collections/community.general/issues/7497, https://github.com/ansible-collections/community.general/pull/7506). diff --git a/changelogs/fragments/7517-elastic-close-client.yaml b/changelogs/fragments/7517-elastic-close-client.yaml deleted file mode 100644 index ee383d26a6..0000000000 --- a/changelogs/fragments/7517-elastic-close-client.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517). diff --git a/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml b/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml deleted file mode 100644 index b991522dd6..0000000000 --- a/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535)." diff --git a/changelogs/fragments/7538-add-krbprincipalattribute-option.yml b/changelogs/fragments/7538-add-krbprincipalattribute-option.yml deleted file mode 100644 index e2e2ce61c2..0000000000 --- a/changelogs/fragments/7538-add-krbprincipalattribute-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538). diff --git a/changelogs/fragments/7540-proxmox-update-config.yml b/changelogs/fragments/7540-proxmox-update-config.yml deleted file mode 100644 index d89c26115f..0000000000 --- a/changelogs/fragments/7540-proxmox-update-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - adds ``update`` parameter, allowing update of an already existing containers configuration (https://github.com/ansible-collections/community.general/pull/7540). diff --git a/changelogs/fragments/7542-irc-logentries-ssl.yml b/changelogs/fragments/7542-irc-logentries-ssl.yml deleted file mode 100644 index 6897087dfb..0000000000 --- a/changelogs/fragments/7542-irc-logentries-ssl.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542)." - - "irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542)." diff --git a/changelogs/fragments/7550-irc-use_tls-validate_certs.yml b/changelogs/fragments/7550-irc-use_tls-validate_certs.yml deleted file mode 100644 index 0c99d8fd6f..0000000000 --- a/changelogs/fragments/7550-irc-use_tls-validate_certs.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - "irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``, while keeping ``use_ssl`` as an alias. - The default value for ``validate_certs`` is ``false`` for backwards compatibility. We recommend to every user of - this module to explicitly set ``use_tls=true`` and `validate_certs=true`` whenever possible, especially when - communicating to IRC servers over the internet (https://github.com/ansible-collections/community.general/pull/7550)." diff --git a/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml b/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml deleted file mode 100644 index d2eaf2ff11..0000000000 --- a/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - >- - onepassword lookup plugin - field and section titles are now case insensitive when using - op CLI version two or later. This matches the behavior of version one (https://github.com/ansible-collections/community.general/pull/7564). diff --git a/changelogs/fragments/7569-infiniband-slave-support.yml b/changelogs/fragments/7569-infiniband-slave-support.yml deleted file mode 100644 index f54460842d..0000000000 --- a/changelogs/fragments/7569-infiniband-slave-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569). diff --git a/changelogs/fragments/7577-fix-apt_rpm-module.yml b/changelogs/fragments/7577-fix-apt_rpm-module.yml deleted file mode 100644 index ef55eb5bd2..0000000000 --- a/changelogs/fragments/7577-fix-apt_rpm-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - apt-rpm - the module did not upgrade packages if a newer version exists. Now the package will be reinstalled if the candidate is newer than the installed version (https://github.com/ansible-collections/community.general/issues/7414). diff --git a/changelogs/fragments/7578-irc-tls.yml b/changelogs/fragments/7578-irc-tls.yml deleted file mode 100644 index a7fcbbca29..0000000000 --- a/changelogs/fragments/7578-irc-tls.yml +++ /dev/null @@ -1,4 +0,0 @@ -deprecated_features: - - "irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have been deprecated and will change to ``true`` in community.general 10.0.0 - to improve security. You can already improve security now by explicitly setting them to ``true``. Specifying values now disables the deprecation - warning (https://github.com/ansible-collections/community.general/pull/7578)." diff --git a/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml b/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml deleted file mode 100644 index c9d83c761a..0000000000 --- a/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7588). diff --git a/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml b/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml deleted file mode 100644 index bf584514ae..0000000000 --- a/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7589). diff --git a/changelogs/fragments/7600-proxmox_kvm-hookscript.yml b/changelogs/fragments/7600-proxmox_kvm-hookscript.yml deleted file mode 100644 index 5d79e71657..0000000000 --- a/changelogs/fragments/7600-proxmox_kvm-hookscript.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600)." diff --git a/changelogs/fragments/7601-lvol-fix.yml b/changelogs/fragments/7601-lvol-fix.yml deleted file mode 100644 index b83fe15683..0000000000 --- a/changelogs/fragments/7601-lvol-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601, https://github.com/ansible-collections/community.general/issues/7182). diff --git a/changelogs/fragments/7612-interface_file-method.yml b/changelogs/fragments/7612-interface_file-method.yml deleted file mode 100644 index 38fcb71503..0000000000 --- a/changelogs/fragments/7612-interface_file-method.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "interface_files - also consider ``address_family`` when changing ``option=method`` (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612)." diff --git a/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml b/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml deleted file mode 100644 index 919383686b..0000000000 --- a/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - adding the ``BootProgress`` property when getting ``Systems`` info (https://github.com/ansible-collections/community.general/pull/7626). diff --git a/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml b/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml deleted file mode 100644 index c11cbf3b06..0000000000 --- a/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641). diff --git a/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml b/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml deleted file mode 100644 index 509ab0fd81..0000000000 --- a/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak module utils - expose error message from Keycloak server for HTTP errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645). \ No newline at end of file diff --git a/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml b/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml deleted file mode 100644 index f2d2379872..0000000000 --- a/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ldap - previously the order number (if present) was expected to follow an equals sign in the DN. This makes it so the order number string is identified correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646). diff --git a/changelogs/fragments/7653-fix-cloudflare-lookup.yml b/changelogs/fragments/7653-fix-cloudflare-lookup.yml deleted file mode 100644 index f370a1c1d1..0000000000 --- a/changelogs/fragments/7653-fix-cloudflare-lookup.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652). diff --git a/changelogs/fragments/7676-lvol-pvs-as-list.yml b/changelogs/fragments/7676-lvol-pvs-as-list.yml deleted file mode 100644 index aa28fff59d..0000000000 --- a/changelogs/fragments/7676-lvol-pvs-as-list.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676, https://github.com/ansible-collections/community.general/issues/7504). diff --git a/changelogs/fragments/7683-added-contenttype-parameter.yml b/changelogs/fragments/7683-added-contenttype-parameter.yml deleted file mode 100644 index 52f4b6b0c5..0000000000 --- a/changelogs/fragments/7683-added-contenttype-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684). \ No newline at end of file diff --git a/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml b/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml deleted file mode 100644 index db57d68233..0000000000 --- a/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user - when ``force`` is set, but user does not exist, do not try to delete it (https://github.com/ansible-collections/community.general/pull/7696). diff --git a/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml b/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml deleted file mode 100644 index 0cd996c510..0000000000 --- a/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - keycloak_realm_key - the ``provider_id`` option now supports RSA encryption key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698). - - keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional key algorithms (https://github.com/ansible-collections/community.general/pull/7698). - - keycloak_realm_key - the ``config.certificate`` option value is no longer defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698). \ No newline at end of file diff --git a/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml b/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml deleted file mode 100644 index 99893a0ff3..0000000000 --- a/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7703). diff --git a/changelogs/fragments/7704-ssh_config_identities_only_option.yml b/changelogs/fragments/7704-ssh_config_identities_only_option.yml deleted file mode 100644 index 9efa10b70f..0000000000 --- a/changelogs/fragments/7704-ssh_config_identities_only_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7704). diff --git a/changelogs/fragments/7717-prevent-modprobe-error.yml b/changelogs/fragments/7717-prevent-modprobe-error.yml deleted file mode 100644 index bfef30e67b..0000000000 --- a/changelogs/fragments/7717-prevent-modprobe-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - modprobe - listing modules files or modprobe files could trigger a FileNotFoundError if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module (https://github.com/ansible-collections/community.general/issues/7717). diff --git a/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml b/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml deleted file mode 100644 index bffd40efcd..0000000000 --- a/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``, ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password policies (https://github.com/ansible-collections/community.general/pull/7723). - - ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723). diff --git a/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml b/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml deleted file mode 100644 index 534d96e123..0000000000 --- a/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737). diff --git a/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml b/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml deleted file mode 100644 index 1c142b62ef..0000000000 --- a/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mail - add ``Message-ID`` header; which is required by some mail servers (https://github.com/ansible-collections/community.general/pull/7740). diff --git a/changelogs/fragments/7746-raw_post-without-actions.yml b/changelogs/fragments/7746-raw_post-without-actions.yml deleted file mode 100644 index 10dc110c5e..0000000000 --- a/changelogs/fragments/7746-raw_post-without-actions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - xcc_redfish_command - added support for raw POSTs (``command=PostResource`` in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746). diff --git a/changelogs/fragments/7754-fixed-payload-format.yml b/changelogs/fragments/7754-fixed-payload-format.yml deleted file mode 100644 index 01458053e5..0000000000 --- a/changelogs/fragments/7754-fixed-payload-format.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - statusio_maintenance - fix error caused by incorrectly formed API data payload. Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754). \ No newline at end of file diff --git a/changelogs/fragments/7765-mail-message-id.yml b/changelogs/fragments/7765-mail-message-id.yml deleted file mode 100644 index 54af767ecf..0000000000 --- a/changelogs/fragments/7765-mail-message-id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "mail module, mail callback plugin - allow to configure the domain name of the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765)." diff --git a/changelogs/fragments/7782-cloudflare_dns-spf.yml b/changelogs/fragments/7782-cloudflare_dns-spf.yml deleted file mode 100644 index 83e7fe79bb..0000000000 --- a/changelogs/fragments/7782-cloudflare_dns-spf.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "cloudflare_dns - remove support for SPF records. These are no longer supported by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782)." diff --git a/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml b/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml deleted file mode 100644 index dd20a4ea18..0000000000 --- a/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - allow custom user storage providers to be set through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789). diff --git a/changelogs/fragments/7790-gitlab-runner-api-pagination.yml b/changelogs/fragments/7790-gitlab-runner-api-pagination.yml deleted file mode 100644 index 59a65ea8ef..0000000000 --- a/changelogs/fragments/7790-gitlab-runner-api-pagination.yml +++ /dev/null @@ -1,8 +0,0 @@ -bugfixes: - - gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790). - -minor_changes: - - gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook, - gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable, - gitlab_user - improve API pagination and compatibility with different versions - of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790). diff --git a/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml b/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml deleted file mode 100644 index 1e061ce6af..0000000000 --- a/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - running ``state=template`` will first check whether VM is already a template (https://github.com/ansible-collections/community.general/pull/7792). diff --git a/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml b/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml deleted file mode 100644 index 43fd4f5251..0000000000 --- a/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_otptoken - the module expect ``ipatokendisabled`` as string but the ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795). diff --git a/changelogs/fragments/7821-mssql_script-py2.yml b/changelogs/fragments/7821-mssql_script-py2.yml deleted file mode 100644 index 79de688628..0000000000 --- a/changelogs/fragments/7821-mssql_script-py2.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818, https://github.com/ansible-collections/community.general/pull/7821)." diff --git a/changelogs/fragments/7826-consul-modules-refactoring.yaml b/changelogs/fragments/7826-consul-modules-refactoring.yaml deleted file mode 100644 index a51352d88e..0000000000 --- a/changelogs/fragments/7826-consul-modules-refactoring.yaml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - 'consul_policy, consul_role, consul_session - removed dependency on ``requests`` and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826, https://github.com/ansible-collections/community.general/pull/7878).' - - consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - ``service_identities`` now expects a ``service_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - ``node_identities`` now expects a ``node_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). \ No newline at end of file diff --git a/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml b/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml deleted file mode 100644 index dcb1ebb218..0000000000 --- a/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843). diff --git a/changelogs/fragments/7847-gitlab-issue-title.yml b/changelogs/fragments/7847-gitlab-issue-title.yml deleted file mode 100644 index c8b8e49905..0000000000 --- a/changelogs/fragments/7847-gitlab-issue-title.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846). diff --git a/changelogs/fragments/7870-homebrew-cask-installed-detection.yml b/changelogs/fragments/7870-homebrew-cask-installed-detection.yml deleted file mode 100644 index 1c70c9a2d4..0000000000 --- a/changelogs/fragments/7870-homebrew-cask-installed-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew - detect already installed formulae and casks using JSON output from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864). diff --git a/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml b/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml deleted file mode 100644 index 82b4fe31d9..0000000000 --- a/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox - fix updating a container config if the setting does not already exist (https://github.com/ansible-collections/community.general/pull/7872). diff --git a/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml b/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml deleted file mode 100644 index 83d302e9b9..0000000000 --- a/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "incus connection plugin - treats ``inventory_hostname`` as a variable instead of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874)." diff --git a/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml b/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml deleted file mode 100644 index cb2caa3780..0000000000 --- a/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). - - ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). diff --git a/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml b/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml deleted file mode 100644 index 485950c11c..0000000000 --- a/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - fixes issue when metadata is provided in desired state when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226, https://github.com/ansible-collections/community.general/pull/7881). \ No newline at end of file diff --git a/changelogs/fragments/7882-add-redfish-get-service-identification.yml b/changelogs/fragments/7882-add-redfish-get-service-identification.yml deleted file mode 100644 index 463c9a2bc5..0000000000 --- a/changelogs/fragments/7882-add-redfish-get-service-identification.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - add command ``GetServiceIdentification`` to get service identification (https://github.com/ansible-collections/community.general/issues/7882). diff --git a/changelogs/fragments/7896-add-terraform-diff-mode.yml b/changelogs/fragments/7896-add-terraform-diff-mode.yml deleted file mode 100644 index 7c0834efa5..0000000000 --- a/changelogs/fragments/7896-add-terraform-diff-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - add support for ``diff_mode`` for terraform resource_changes (https://github.com/ansible-collections/community.general/pull/7896). diff --git a/changelogs/fragments/7897-consul-action-group.yaml b/changelogs/fragments/7897-consul-action-group.yaml deleted file mode 100644 index 1764e1970d..0000000000 --- a/changelogs/fragments/7897-consul-action-group.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session, consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897). diff --git a/changelogs/fragments/7901-consul-acl-deprecation.yaml b/changelogs/fragments/7901-consul-acl-deprecation.yaml deleted file mode 100644 index 9480b04ce9..0000000000 --- a/changelogs/fragments/7901-consul-acl-deprecation.yaml +++ /dev/null @@ -1,3 +0,0 @@ -deprecated_features: - - "consul_acl - the module has been deprecated and will be removed in community.general 10.0.0. ``consul_token`` and ``consul_policy`` - can be used instead (https://github.com/ansible-collections/community.general/pull/7901)." \ No newline at end of file diff --git a/changelogs/fragments/7916-add-redfish-set-service-identification.yml b/changelogs/fragments/7916-add-redfish-set-service-identification.yml deleted file mode 100644 index 2b1f2ca7b3..0000000000 --- a/changelogs/fragments/7916-add-redfish-set-service-identification.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_config - add command ``SetServiceIdentification`` to set service identification (https://github.com/ansible-collections/community.general/issues/7916). diff --git a/changelogs/fragments/7919-onepassword-fieldname-casing.yaml b/changelogs/fragments/7919-onepassword-fieldname-casing.yaml deleted file mode 100644 index 9119f896f0..0000000000 --- a/changelogs/fragments/7919-onepassword-fieldname-casing.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - onepassword lookup plugin - failed for fields that were in sections and had uppercase letters in the label/ID. Field lookups are now case insensitive in all cases (https://github.com/ansible-collections/community.general/pull/7919). diff --git a/changelogs/fragments/7951-fix-redfish_info-exception.yml b/changelogs/fragments/7951-fix-redfish_info-exception.yml deleted file mode 100644 index cd5707da4b..0000000000 --- a/changelogs/fragments/7951-fix-redfish_info-exception.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "redfish_info - correct uncaught exception when attempting to retrieve ``Chassis`` information (https://github.com/ansible-collections/community.general/pull/7952)." diff --git a/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml b/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml deleted file mode 100644 index 10f8e6d26a..0000000000 --- a/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - fixed status check getting from node-specific API endpoint (https://github.com/ansible-collections/community.general/issues/7817). diff --git a/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml b/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml deleted file mode 100644 index 30186804d4..0000000000 --- a/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_hook - adds ``releases_events`` parameter for supporting Releases events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956). \ No newline at end of file diff --git a/changelogs/fragments/7963-fix-terraform-diff-absent.yml b/changelogs/fragments/7963-fix-terraform-diff-absent.yml deleted file mode 100644 index 4e2cf53c9b..0000000000 --- a/changelogs/fragments/7963-fix-terraform-diff-absent.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes`` does not exist (https://github.com/ansible-collections/community.general/pull/7963). diff --git a/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml b/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml deleted file mode 100644 index 143247bc91..0000000000 --- a/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml +++ /dev/null @@ -1,10 +0,0 @@ -bugfixes: - - "cargo - fix idempotency issues when using a custom installation path - for packages (using the ``--path`` parameter). - The initial installation runs fine, but subsequent runs use the - ``get_installed()`` function which did not check the given installation - location, before running ``cargo install``. This resulted in a false - ``changed`` state. - Also the removal of packeges using ``state: absent`` failed, as the - installation check did not use the given parameter - (https://github.com/ansible-collections/community.general/pull/7970)." diff --git a/changelogs/fragments/7976-add-mssql_script-transactional-support.yml b/changelogs/fragments/7976-add-mssql_script-transactional-support.yml deleted file mode 100644 index dc6f335247..0000000000 --- a/changelogs/fragments/7976-add-mssql_script-transactional-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mssql_script - adds transactional (rollback/commit) support via optional boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976). diff --git a/changelogs/fragments/7983-sudoers-add-support-noexec.yml b/changelogs/fragments/7983-sudoers-add-support-noexec.yml deleted file mode 100644 index f58e6f7ec8..0000000000 --- a/changelogs/fragments/7983-sudoers-add-support-noexec.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983). diff --git a/changelogs/fragments/7994-bitwarden-session-arg.yaml b/changelogs/fragments/7994-bitwarden-session-arg.yaml deleted file mode 100644 index 36f9622ac0..0000000000 --- a/changelogs/fragments/7994-bitwarden-session-arg.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - add ``bw_session`` option, to pass session key instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994)." diff --git a/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml b/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml deleted file mode 100644 index 9998583b83..0000000000 --- a/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``, and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074, https://github.com/ansible-collections/community.general/pull/7996). \ No newline at end of file diff --git a/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml b/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml deleted file mode 100644 index 1170a108fd..0000000000 --- a/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998). \ No newline at end of file diff --git a/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml b/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml deleted file mode 100644 index 21796e7a0e..0000000000 --- a/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info - allow for a GET operation invoked by ``GetUpdateStatus`` to allow for an empty response body for cases where a service returns 204 No Content (https://github.com/ansible-collections/community.general/issues/8003). diff --git a/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml b/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml deleted file mode 100644 index 7337233aea..0000000000 --- a/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - allows to fetch all records of a given collection ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` is provided (https://github.com/ansible-collections/community.general/pull/8013)." diff --git a/changelogs/fragments/8029-iptables-state-restore-check-mode.yml b/changelogs/fragments/8029-iptables-state-restore-check-mode.yml deleted file mode 100644 index 900ea50988..0000000000 --- a/changelogs/fragments/8029-iptables-state-restore-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - iptables_state - fix idempotency issues when restoring incomplete iptables dumps (https://github.com/ansible-collections/community.general/issues/8029). diff --git a/changelogs/fragments/8038-proxmox-startup.yml b/changelogs/fragments/8038-proxmox-startup.yml deleted file mode 100644 index f8afbc0c4e..0000000000 --- a/changelogs/fragments/8038-proxmox-startup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - adds ``startup`` parameters to configure startup order, startup delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038). diff --git a/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml b/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml deleted file mode 100644 index 9954be302a..0000000000 --- a/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew - error returned from brew command was ignored and tried to parse empty JSON. Fix now checks for an error and raises it to give accurate error message to users (https://github.com/ansible-collections/community.general/issues/8047). diff --git a/changelogs/fragments/8057-pam_limits-check-mode.yml b/changelogs/fragments/8057-pam_limits-check-mode.yml deleted file mode 100644 index f6f034e9b8..0000000000 --- a/changelogs/fragments/8057-pam_limits-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "pam_limits - when the file does not exist, do not create it in check mode (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057)." diff --git a/changelogs/fragments/8073-ldap-attrs-diff.yml b/changelogs/fragments/8073-ldap-attrs-diff.yml deleted file mode 100644 index 071fc2919e..0000000000 --- a/changelogs/fragments/8073-ldap-attrs-diff.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ldap_attrs - module now supports diff mode, showing which attributes are changed within an operation (https://github.com/ansible-collections/community.general/pull/8073). \ No newline at end of file diff --git a/changelogs/fragments/8075-optional-space-around-section-names.yaml b/changelogs/fragments/8075-optional-space-around-section-names.yaml deleted file mode 100644 index 2e44555f08..0000000000 --- a/changelogs/fragments/8075-optional-space-around-section-names.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "ini_file - support optional spaces between section names and their surrounding brackets (https://github.com/ansible-collections/community.general/pull/8075)." diff --git a/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml b/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml deleted file mode 100644 index 1224ebdfa2..0000000000 --- a/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "revbitspss lookup plugin - removed a redundant unicode prefix. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code (https://github.com/ansible-collections/community.general/pull/8087)." diff --git a/changelogs/fragments/8091-consul-token-fixes.yaml b/changelogs/fragments/8091-consul-token-fixes.yaml deleted file mode 100644 index c734623588..0000000000 --- a/changelogs/fragments/8091-consul-token-fixes.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091)." \ No newline at end of file diff --git a/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml b/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml deleted file mode 100644 index 58f1478914..0000000000 --- a/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092)." diff --git a/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml b/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml deleted file mode 100644 index f36c145d74..0000000000 --- a/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - java_cert - enable ``owner``, ``group``, ``mode``, and other generic file arguments (https://github.com/ansible-collections/community.general/pull/8116). \ No newline at end of file diff --git a/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml b/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml deleted file mode 100644 index 47f8af9ac3..0000000000 --- a/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118). diff --git a/changelogs/fragments/8126-filesystem-bcachefs-support.yaml b/changelogs/fragments/8126-filesystem-bcachefs-support.yaml deleted file mode 100644 index 32ff5c64da..0000000000 --- a/changelogs/fragments/8126-filesystem-bcachefs-support.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126). diff --git a/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml b/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml deleted file mode 100644 index 755d7ed4fe..0000000000 --- a/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - linode inventory plugin - add descriptive error message for linode inventory plugin (https://github.com/ansible-collections/community.general/pull/8133). - diff --git a/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml b/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml deleted file mode 100644 index 0eeee752df..0000000000 --- a/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` (https://github.com/ansible-collections/community.general/issues/8151). diff --git a/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml b/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml deleted file mode 100644 index 40ae1f84a4..0000000000 --- a/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153). diff --git a/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml b/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml deleted file mode 100644 index d1fb344ba5..0000000000 --- a/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` (https://github.com/ansible-collections/community.general/pull/8154). \ No newline at end of file diff --git a/changelogs/fragments/8158-gitlab-version-check.yml b/changelogs/fragments/8158-gitlab-version-check.yml deleted file mode 100644 index 046bca938f..0000000000 --- a/changelogs/fragments/8158-gitlab-version-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158)." diff --git a/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml b/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml deleted file mode 100644 index 212ecc9fd8..0000000000 --- a/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add command ``ResetToDefaults`` to reset manager to default state (https://github.com/ansible-collections/community.general/issues/8163). diff --git a/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml b/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml deleted file mode 100644 index da5be9c9e0..0000000000 --- a/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore lookup - add ``missing_subkey`` parameter defining the behavior of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166). diff --git a/changelogs/fragments/8169-lxml.yml b/changelogs/fragments/8169-lxml.yml deleted file mode 100644 index e2c1b8b952..0000000000 --- a/changelogs/fragments/8169-lxml.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "xml - make module work with lxml 5.1.1, which removed some internals that the module was relying on (https://github.com/ansible-collections/community.general/pull/8169)." diff --git a/changelogs/fragments/8173-osx_defaults-check_type.yml b/changelogs/fragments/8173-osx_defaults-check_type.yml deleted file mode 100644 index a35f609bf3..0000000000 --- a/changelogs/fragments/8173-osx_defaults-check_type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - osx_defaults - add option ``check_types`` to enable changing the type of existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173). diff --git a/changelogs/fragments/8175-get_ipa_version_regex.yml b/changelogs/fragments/8175-get_ipa_version_regex.yml deleted file mode 100644 index e2a51d1b91..0000000000 --- a/changelogs/fragments/8175-get_ipa_version_regex.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175). diff --git a/changelogs/fragments/8183-from_ini_to_ini.yml b/changelogs/fragments/8183-from_ini_to_ini.yml deleted file mode 100644 index 1ff455f6ee..0000000000 --- a/changelogs/fragments/8183-from_ini_to_ini.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." - - "from_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." diff --git a/changelogs/fragments/8188-bitwarden-add-organization_id.yml b/changelogs/fragments/8188-bitwarden-add-organization_id.yml deleted file mode 100644 index c57ba3a479..0000000000 --- a/changelogs/fragments/8188-bitwarden-add-organization_id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188). diff --git a/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml b/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml deleted file mode 100644 index 6b96d98a7f..0000000000 --- a/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195). diff --git a/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml b/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml deleted file mode 100644 index b621fe284c..0000000000 --- a/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM VMs (https://github.com/ansible-collections/community.general/pull/8199)." diff --git a/changelogs/fragments/8211-riak-admin-sub-command-support.yml b/changelogs/fragments/8211-riak-admin-sub-command-support.yml deleted file mode 100644 index dc6eb00e45..0000000000 --- a/changelogs/fragments/8211-riak-admin-sub-command-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "riak - support ``riak admin`` sub-command in newer Riak KV versions beside the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211)." \ No newline at end of file diff --git a/changelogs/fragments/8215-add-docker-v2-protocol.yml b/changelogs/fragments/8215-add-docker-v2-protocol.yml deleted file mode 100644 index 6a9cc60556..0000000000 --- a/changelogs/fragments/8215-add-docker-v2-protocol.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, https://github.com/ansible-collections/community.general/pull/8216). diff --git a/changelogs/fragments/8222-datetime.yml b/changelogs/fragments/8222-datetime.yml deleted file mode 100644 index 00bf862186..0000000000 --- a/changelogs/fragments/8222-datetime.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - "Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead of offset-naive UTC timestamps, - which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222)." diff --git a/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml b/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml deleted file mode 100644 index 47f7e6bd7b..0000000000 --- a/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` to normalizations (https://github.com/ansible-collections/community.general/pull/8223). diff --git a/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml b/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml deleted file mode 100644 index 0574141f61..0000000000 --- a/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` (https://github.com/ansible-collections/community.general/pull/8224). diff --git a/changelogs/fragments/8225-unsafe.yml b/changelogs/fragments/8225-unsafe.yml deleted file mode 100644 index 496797ef74..0000000000 --- a/changelogs/fragments/8225-unsafe.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "inventory plugins - add unsafe wrapper to avoid marking strings that do not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, https://github.com/ansible-collections/community.general/pull/8225)." diff --git a/changelogs/fragments/8226-mh-vardict.yml b/changelogs/fragments/8226-mh-vardict.yml deleted file mode 100644 index c7c62c7db0..0000000000 --- a/changelogs/fragments/8226-mh-vardict.yml +++ /dev/null @@ -1,10 +0,0 @@ -deprecated_features: - - ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226). - - ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226). -minor_changes: - - gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). diff --git a/changelogs/fragments/8236-portage-select-feature.yml b/changelogs/fragments/8236-portage-select-feature.yml deleted file mode 100644 index 742d5cc966..0000000000 --- a/changelogs/fragments/8236-portage-select-feature.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - portage - adds the possibility to explicitely tell portage to write packages to world file (https://github.com/ansible-collections/community.general/issues/6226, https://github.com/ansible-collections/community.general/pull/8236). diff --git a/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml b/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml deleted file mode 100644 index b9d80a7cba..0000000000 --- a/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "bitwarden_secrets_manager lookup plugin - implements retry with exponential backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238)." diff --git a/changelogs/fragments/8247-apt_rpm-latest.yml b/changelogs/fragments/8247-apt_rpm-latest.yml deleted file mode 100644 index d62fb40340..0000000000 --- a/changelogs/fragments/8247-apt_rpm-latest.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: - - "apt_rpm - add new states ``latest`` and ``present_not_latest``. The value ``latest`` is equivalent to the current behavior of - ``present``, which will upgrade a package if a newer version exists. ``present_not_latest`` does what most users would expect ``present`` - to do: it does not upgrade if the package is already installed. The current behavior of ``present`` will be deprecated in a later version, - and eventually changed to that of ``present_not_latest`` - (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247)." diff --git a/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml b/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml deleted file mode 100644 index ca1d61aefd..0000000000 --- a/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` (https://github.com/ansible-collections/community.general/pull/8257). diff --git a/changelogs/fragments/8263-apt_rpm-install-check.yml b/changelogs/fragments/8263-apt_rpm-install-check.yml deleted file mode 100644 index ae44616e79..0000000000 --- a/changelogs/fragments/8263-apt_rpm-install-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "apt_rpm - when checking whether packages were installed after running ``apt-get -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263)." diff --git a/changelogs/fragments/8264-run_command.yml b/changelogs/fragments/8264-run_command.yml deleted file mode 100644 index dd66cd6123..0000000000 --- a/changelogs/fragments/8264-run_command.yml +++ /dev/null @@ -1,14 +0,0 @@ -minor_changes: - - "aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "installp - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "lvg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "macports - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "parted - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "pkgin - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "portinstall - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "slackpkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "swdepot - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." diff --git a/changelogs/fragments/8274-homebrew-force-formula.yml b/changelogs/fragments/8274-homebrew-force-formula.yml deleted file mode 100644 index 4a9e471f4c..0000000000 --- a/changelogs/fragments/8274-homebrew-force-formula.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "homebrew - adds ``force_formula`` parameter to disambiguate a formula from a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274)." \ No newline at end of file diff --git a/changelogs/fragments/8280-mh-deprecations.yml b/changelogs/fragments/8280-mh-deprecations.yml deleted file mode 100644 index ae70f96b1e..0000000000 --- a/changelogs/fragments/8280-mh-deprecations.yml +++ /dev/null @@ -1,8 +0,0 @@ -deprecated_features: - - MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` (https://github.com/ansible-collections/community.general/pull/8280). diff --git a/changelogs/fragments/8281-puppet-waitforlock.yaml b/changelogs/fragments/8281-puppet-waitforlock.yaml deleted file mode 100644 index bd8a820170..0000000000 --- a/changelogs/fragments/8281-puppet-waitforlock.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282). diff --git a/changelogs/fragments/8285-apt_rpm-state-deprecate.yml b/changelogs/fragments/8285-apt_rpm-state-deprecate.yml deleted file mode 100644 index 19f3415841..0000000000 --- a/changelogs/fragments/8285-apt_rpm-state-deprecate.yml +++ /dev/null @@ -1,7 +0,0 @@ -deprecated_features: - - "apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated and will change in community.general 11.0.0. - Right now the module will upgrade a package to the latest version if one of these two states is used. You should explicitly - use ``state=latest`` if you want this behavior, and switch to ``state=present_not_latest`` if you do not want to upgrade the - package if it is already installed. In community.general 11.0.0 the behavior of ``state=present`` and ``state=installed`` will - change to that of ``state=present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, - https://github.com/ansible-collections/community.general/pull/8285)." diff --git a/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml b/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml deleted file mode 100644 index 94de04740b..0000000000 --- a/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd_runner module_utils - add validation for minimum and maximum length in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288). diff --git a/changelogs/fragments/8289-python-runner.yml b/changelogs/fragments/8289-python-runner.yml deleted file mode 100644 index 97a45fd8f3..0000000000 --- a/changelogs/fragments/8289-python-runner.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289). diff --git a/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml b/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml deleted file mode 100644 index 3168bf20fd..0000000000 --- a/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, https://github.com/ansible-collections/community.general/pull/8337). diff --git a/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml b/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml deleted file mode 100644 index c2162771f2..0000000000 --- a/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "merge_variables lookup plugin - fixing cross host merge: providing access to foreign hosts variables to the perspective of the host that is performing the merge (https://github.com/ansible-collections/community.general/pull/8303)." diff --git a/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml b/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml deleted file mode 100644 index df4a892733..0000000000 --- a/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320). diff --git a/changelogs/fragments/8321-fix-opentelemetry-callback.yml b/changelogs/fragments/8321-fix-opentelemetry-callback.yml deleted file mode 100644 index a02f12c6b9..0000000000 --- a/changelogs/fragments/8321-fix-opentelemetry-callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry - add support for HTTP trace_exporter and configures the behavior via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8321). diff --git a/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml b/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml deleted file mode 100644 index d29aed5ae4..0000000000 --- a/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "homebrew, homebrew_cask - refactor common argument validation logic into a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, https://github.com/ansible-collections/community.general/pull/8324)." \ No newline at end of file diff --git a/changelogs/fragments/8334-proxmox-action-group.yml b/changelogs/fragments/8334-proxmox-action-group.yml deleted file mode 100644 index 0e5aeeccde..0000000000 --- a/changelogs/fragments/8334-proxmox-action-group.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox* modules - there is now a ``community.general.proxmox`` module defaults group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334)." diff --git a/changelogs/fragments/8355-keycloak-idp-sanitize.yaml b/changelogs/fragments/8355-keycloak-idp-sanitize.yaml deleted file mode 100644 index 3a7942bb88..0000000000 --- a/changelogs/fragments/8355-keycloak-idp-sanitize.yaml +++ /dev/null @@ -1,2 +0,0 @@ -security_fixes: - - keycloak_identity_provider - the client secret was not correctly sanitized by the module. The return values ``proposed``, ``existing``, and ``end_state``, as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355). \ No newline at end of file diff --git a/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml b/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml deleted file mode 100644 index b62521ec9f..0000000000 --- a/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363). diff --git a/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml b/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml deleted file mode 100644 index e0a90be311..0000000000 --- a/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367)." diff --git a/changelogs/fragments/8373-honour-disable-logs.yaml b/changelogs/fragments/8373-honour-disable-logs.yaml deleted file mode 100644 index 112b10a9f4..0000000000 --- a/changelogs/fragments/8373-honour-disable-logs.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - opentelemetry callback plugin - honour the ``disable_logs`` option to avoid storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373). - diff --git a/changelogs/fragments/8379-verbose-mode-pkg5.yml b/changelogs/fragments/8379-verbose-mode-pkg5.yml deleted file mode 100644 index abc1c61dce..0000000000 --- a/changelogs/fragments/8379-verbose-mode-pkg5.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, https://github.com/ansible-collections/community.general/pull/8382). diff --git a/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml b/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml deleted file mode 100644 index b9c35cd0e4..0000000000 --- a/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - "gitlab modules - the basic auth method on GitLab API have been deprecated and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383)." diff --git a/changelogs/fragments/9.0.0.yml b/changelogs/fragments/9.0.0.yml deleted file mode 100644 index 8de366f74c..0000000000 --- a/changelogs/fragments/9.0.0.yml +++ /dev/null @@ -1 +0,0 @@ -release_summary: This is release 9.0.0 of `community.general`, released on 2024-05-20. diff --git a/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml b/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml deleted file mode 100644 index 2d5dc6205c..0000000000 --- a/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_sudorule - adds options to include denied commands or command groups (https://github.com/ansible-collections/community.general/pull/7415). diff --git a/changelogs/fragments/aix_filesystem-crfs-issue.yml b/changelogs/fragments/aix_filesystem-crfs-issue.yml deleted file mode 100644 index 6b3ddfb0d6..0000000000 --- a/changelogs/fragments/aix_filesystem-crfs-issue.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - aix_filesystem - fix issue with empty list items in crfs logic and option order (https://github.com/ansible-collections/community.general/pull/8052). diff --git a/changelogs/fragments/bitwarden-lookup-performance.yaml b/changelogs/fragments/bitwarden-lookup-performance.yaml deleted file mode 100644 index cb0405b1cb..0000000000 --- a/changelogs/fragments/bitwarden-lookup-performance.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - when looking for items using an item ID, the item is now accessed directly with ``bw get item`` instead of searching through all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468)." diff --git a/changelogs/fragments/hipchat.yml b/changelogs/fragments/hipchat.yml deleted file mode 100644 index 0260c09c84..0000000000 --- a/changelogs/fragments/hipchat.yml +++ /dev/null @@ -1,4 +0,0 @@ -deprecated_features: - - "hipchat callback plugin - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. - The callback plugin is therefore deprecated and will be removed from community.general 10.0.0 if nobody provides compelling reasons to still keep it - (https://github.com/ansible-collections/community.general/issues/8184, https://github.com/ansible-collections/community.general/pull/8189)." diff --git a/changelogs/fragments/internal-redirects.yml b/changelogs/fragments/internal-redirects.yml deleted file mode 100644 index 23ce456d4e..0000000000 --- a/changelogs/fragments/internal-redirects.yml +++ /dev/null @@ -1,5 +0,0 @@ -removed_features: - - "The deprecated redirects for internal module names have been removed. - These internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm`` that redirect to the short FQCN ``community.general.apt_rpm``. - They were originally needed to implement flatmapping; as various tooling started to recommend users to use the long names flatmapping was removed from the collection - and redirects were added for users who already followed these incorrect recommendations (https://github.com/ansible-collections/community.general/pull/7835)." diff --git a/changelogs/fragments/inventory-rce.yml b/changelogs/fragments/inventory-rce.yml deleted file mode 100644 index 9eee6dff52..0000000000 --- a/changelogs/fragments/inventory-rce.yml +++ /dev/null @@ -1,6 +0,0 @@ -security_fixes: - - "cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, proxmox, scaleway, stackpath_compute, virtualbox, - and xen_orchestra inventory plugin - make sure all data received from the remote servers is marked as unsafe, so remote - code execution by obtaining texts that can be evaluated as templates is not possible - (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, - https://github.com/ansible-collections/community.general/pull/8098)." diff --git a/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml b/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml deleted file mode 100644 index 03ac8ee01b..0000000000 --- a/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "lxd connection plugin - tighten the detection logic for lxd ``Instance not found`` errors, to avoid false detection on unrelated errors such as ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521)." diff --git a/changelogs/fragments/lxd-instances-api-endpoint-added.yml b/changelogs/fragments/lxd-instances-api-endpoint-added.yml deleted file mode 100644 index 3e7aa3b50e..0000000000 --- a/changelogs/fragments/lxd-instances-api-endpoint-added.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint (https://github.com/ansible-collections/community.general/pull/7980)." diff --git a/changelogs/fragments/pacemaker-cluster.yml b/changelogs/fragments/pacemaker-cluster.yml deleted file mode 100644 index 07e1ff3e04..0000000000 --- a/changelogs/fragments/pacemaker-cluster.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "pacemaker_cluster - actually implement check mode, which the module claims to support. This means that until now the module - also did changes in check mode (https://github.com/ansible-collections/community.general/pull/8081)." diff --git a/changelogs/fragments/pkgin.yml b/changelogs/fragments/pkgin.yml deleted file mode 100644 index 60eff0bfe5..0000000000 --- a/changelogs/fragments/pkgin.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971). diff --git a/changelogs/fragments/puppet_lang_force.yml b/changelogs/fragments/puppet_lang_force.yml deleted file mode 100644 index b826c8dba4..0000000000 --- a/changelogs/fragments/puppet_lang_force.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - puppet - add option ``environment_lang`` to set the environment language encoding. Defaults to lang ``C``. It is recommended - to set it to ``C.UTF-8`` or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000) diff --git a/changelogs/fragments/remove_deprecated.yml b/changelogs/fragments/remove_deprecated.yml deleted file mode 100644 index e777bf14e2..0000000000 --- a/changelogs/fragments/remove_deprecated.yml +++ /dev/null @@ -1,18 +0,0 @@ -removed_features: - - "rax* modules, rax module utils, rax docs fragment - the Rackspace modules relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "stackdriver - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "webfaction_* modules - these modules relied on HTTPS APIs that do not exist anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "flowdock - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` option was removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` have been removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198)." - - "proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` instead, respectively (https://github.com/ansible-collections/community.general/pull/8198)." -deprecated_features: - - "django_manage - the ``ack_venv_creation_deprecation`` option has no more effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198)." -breaking_changes: - - "redfish_command, redfish_config, redfish_info - change the default for ``timeout`` from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198)." - - "cpanm - the default of the ``mode`` option changed from ``compatibility`` to ``new`` (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - the module will now fail if ``virtualenv`` is specified but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198)." diff --git a/galaxy.yml b/galaxy.yml index 397e104ca2..5c1688ce8f 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.0.0 +version: 9.1.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 95e509753e119a8bbf87efc9e58c30a7f978d06d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 20 May 2024 12:36:02 +0200 Subject: [PATCH 093/482] Update CI cron schedules. --- .azure-pipelines/azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 6f5a391c4c..7dc438ad3a 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -29,14 +29,14 @@ schedules: always: true branches: include: + - stable-9 - stable-8 - - stable-7 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-6 + - stable-7 variables: - name: checkoutPath From 4792e21416f681758df00ca85bf05abc6e56e03b Mon Sep 17 00:00:00 2001 From: kurokobo Date: Mon, 20 May 2024 22:40:05 +0900 Subject: [PATCH 094/482] docs: add seealso and notes for yaml callback plugin (#8396) * docs: add seealso and notes for yaml callback plugin * docs: correct links to parameters Co-authored-by: Felix Fontein * docs: shorten the long lines --------- Co-authored-by: Felix Fontein --- plugins/callback/yaml.py | 10 ++++++++++ tests/sanity/ignore-2.13.txt | 1 + tests/sanity/ignore-2.14.txt | 1 + 3 files changed, 12 insertions(+) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index ae2c8f8810..e41f69ec53 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -19,6 +19,16 @@ DOCUMENTATION = ''' - default_callback requirements: - set as stdout in configuration + seealso: + - plugin: ansible.builtin.default + plugin_type: callback + description: > + There is a parameter O(ansible.builtin.default#callback:result_format) in P(ansible.builtin.default#callback) + that allows you to change the output format to YAML. + notes: + - > + With ansible-core 2.13 or newer, you can instead specify V(yaml) for the parameter O(ansible.builtin.default#callback:result_format) + in P(ansible.builtin.default#callback). ''' import yaml diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index cfeaff7c31..6f6495dd17 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,5 +1,6 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/callback/timestamp.py validate-modules:invalid-documentation +plugins/callback/yaml.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 247d43fe37..24d7521036 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,5 +1,6 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/callback/timestamp.py validate-modules:invalid-documentation +plugins/callback/yaml.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice From da2c87ce0d3546edc068137d0a1279605bc377a4 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 25 May 2024 07:59:00 +1200 Subject: [PATCH 095/482] fix test helper handling of rc (#8387) * fix test helper handling of rc * fix side_effect logic for rc != 0 * fix side_effect func + sanity tests * fix ignore files * fix code * revamp the generator for run_command calls returns in testcase * remove unused import * Update tests/sanity/ignore-2.18.txt Co-authored-by: Felix Fontein * Update tests/sanity/ignore-2.17.txt Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- tests/sanity/ignore-2.17.txt | 1 + tests/sanity/ignore-2.18.txt | 1 + tests/unit/plugins/modules/helper.py | 20 +++++++++++++------ .../plugins/modules/test_django_command.yaml | 2 -- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt index 7479d6bafe..806c4c5fcf 100644 --- a/tests/sanity/ignore-2.17.txt +++ b/tests/sanity/ignore-2.17.txt @@ -11,4 +11,5 @@ plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt index 7479d6bafe..806c4c5fcf 100644 --- a/tests/sanity/ignore-2.18.txt +++ b/tests/sanity/ignore-2.18.txt @@ -11,4 +11,5 @@ plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/helper.py index 1ffa19aad4..e012980afe 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/helper.py @@ -9,7 +9,6 @@ __metaclass__ = type import sys import json from collections import namedtuple -from itertools import chain, repeat import pytest import yaml @@ -76,12 +75,21 @@ class _RunCmdContext(_BaseContext): self.mock_run_cmd = self._make_mock_run_cmd() def _make_mock_run_cmd(self): - call_results = [(x.rc, x.out, x.err) for x in self.run_cmd_calls] - error_call_results = (123, - "OUT: testcase has not enough run_command calls", - "ERR: testcase has not enough run_command calls") + def _results(): + for result in [(x.rc, x.out, x.err) for x in self.run_cmd_calls]: + yield result + raise Exception("testcase has not enough run_command calls") + + results = _results() + + def side_effect(self_, **kwargs): + result = next(results) + if kwargs.get("check_rc", False) and result[0] != 0: + raise Exception("rc = {0}".format(result[0])) + return result + mock_run_command = self.mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', - side_effect=chain(call_results, repeat(error_call_results))) + side_effect=side_effect) return mock_run_command def check_results(self, results): diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml index 9fe9b419f9..046dd87f03 100644 --- a/tests/unit/plugins/modules/test_django_command.yaml +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -30,8 +30,6 @@ settings: whatever.settings output: failed: true - flags: - xfail: not seem to be failing as it should run_command_calls: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: *env-def From ec886203fc5d7805fe867188084664e607d503b5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 26 May 2024 23:56:49 +1200 Subject: [PATCH 096/482] multiple modules: use new vardict in module (#8411) * use new vardict in module * add changelog frag * Update changelogs/fragments/8411-locale-gen-vardict.yml Co-authored-by: Felix Fontein * set use_old_vardict to false in snap * set use_old_vardict to false in cpanm * set use_old_vardict to false in django mod helper * set use_old_vardict to false in gconftool2_info * set use_old_vardict to false in kernel_blacklist * set use_old_vardict to false in mksysb * set use_old_vardict to false in pipx_info * set use_old_vardict to false in snap_alias * update chglog frag * fix typo --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8411-locale-gen-vardict.yml | 11 +++++++++++ plugins/module_utils/django.py | 1 + plugins/module_utils/vardict.py | 2 +- plugins/modules/cpanm.py | 1 + plugins/modules/gconftool2_info.py | 1 + plugins/modules/hponcfg.py | 1 + plugins/modules/kernel_blacklist.py | 2 +- plugins/modules/locale_gen.py | 1 + plugins/modules/mksysb.py | 1 + plugins/modules/pipx_info.py | 1 + plugins/modules/snap.py | 5 +++-- plugins/modules/snap_alias.py | 1 + 12 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8411-locale-gen-vardict.yml diff --git a/changelogs/fragments/8411-locale-gen-vardict.yml b/changelogs/fragments/8411-locale-gen-vardict.yml new file mode 100644 index 0000000000..5220731281 --- /dev/null +++ b/changelogs/fragments/8411-locale-gen-vardict.yml @@ -0,0 +1,11 @@ +bugfixes: + - django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index b93dabbd2c..fbaf840db2 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -54,6 +54,7 @@ class DjangoModuleHelper(ModuleHelper): django_admin_cmd = None arg_formats = {} django_admin_arg_order = () + use_old_vardict = False def __init__(self): argument_spec = dict(django_std_args) diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py index cfcce4d4d2..51f802483d 100644 --- a/plugins/module_utils/vardict.py +++ b/plugins/module_utils/vardict.py @@ -100,7 +100,7 @@ class _Variable(object): return def __str__(self): - return "<_Variable: value={0!r}, initial={1!r}, diff={2}, output={3}, change={4}, verbosity={5}>".format( + return "".format( self.value, self.initial_value, self.diff, self.output, self.change, self.verbosity ) diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 302f995932..3beae895dc 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -170,6 +170,7 @@ class CPANMinus(ModuleHelper): installdeps=cmd_runner_fmt.as_bool("--installdeps"), pkg_spec=cmd_runner_fmt.as_list(), ) + use_old_vardict = False def __init_module__(self): v = self.vars diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index 282065b95e..f66e2da8f7 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -60,6 +60,7 @@ class GConftoolInfo(ModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py index 612a20d923..206565a235 100644 --- a/plugins/modules/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -98,6 +98,7 @@ class HPOnCfg(ModuleHelper): verbose=cmd_runner_fmt.as_bool("-v"), minfw=cmd_runner_fmt.as_opt_val("-m"), ) + use_old_vardict = False def __run__(self): runner = CmdRunner( diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py index 2a281440a7..224b5bba8c 100644 --- a/plugins/modules/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -67,7 +67,7 @@ class Blacklist(StateModuleHelper): ), supports_check_mode=True, ) - mute_vardict_deprecation = True + use_old_vardict = False def __init_module__(self): self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index 0dd76c9ab4..fe501e0239 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -79,6 +79,7 @@ class LocaleGen(StateModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.vars.set("ubuntu_mode", False) diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index 8272dbf7de..1280f04d59 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -138,6 +138,7 @@ class MkSysB(ModuleHelper): backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), ) + use_old_vardict = False def __init_module__(self): if not os.path.isdir(self.vars.storage_path): diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 34f9681b06..992ca79419 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -150,6 +150,7 @@ class PipXInfo(ModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): if self.vars.executable: diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index fd16764802..16c3aec48b 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -194,6 +194,7 @@ class Snap(StateModuleHelper): }, supports_check_mode=True, ) + use_old_vardict = False @staticmethod def _first_non_zero(a): @@ -405,8 +406,8 @@ class Snap(StateModuleHelper): def state_present(self): - self.vars.meta('classic').set(output=True) - self.vars.meta('channel').set(output=True) + self.vars.set_meta('classic', output=True) + self.vars.set_meta('channel', output=True) actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH] if actionable_refresh: diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py index 54448c6f3a..ba54a9e155 100644 --- a/plugins/modules/snap_alias.py +++ b/plugins/modules/snap_alias.py @@ -105,6 +105,7 @@ class SnapAlias(StateModuleHelper): ], supports_check_mode=True, ) + use_old_vardict = False def _aliases(self): n = self.vars.name From e7ee90a9373a209d3fb239f32d53f152afbcdb70 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 26 May 2024 23:57:24 +1200 Subject: [PATCH 097/482] ansible_galaxy_install: minor refactor (#8413) * minor refactor * add changelog frag * remove commented code * set use_old_vardict to false --- changelogs/fragments/8413-galaxy-refactor.yml | 2 + plugins/modules/ansible_galaxy_install.py | 38 +++++++++---------- 2 files changed, 19 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/8413-galaxy-refactor.yml diff --git a/changelogs/fragments/8413-galaxy-refactor.yml b/changelogs/fragments/8413-galaxy-refactor.yml new file mode 100644 index 0000000000..edd1601be8 --- /dev/null +++ b/changelogs/fragments/8413-galaxy-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index 1e2496daed..d382ed93a9 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -171,7 +171,7 @@ RETURN = """ import re -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException @@ -180,7 +180,9 @@ class AnsibleGalaxyInstall(ModuleHelper): _RE_LIST_PATH = re.compile(r'^# (?P.*)$') _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') - _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + _RE_INSTALL_OUTPUT = re.compile( + r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + ) ansible_version = None output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') @@ -198,17 +200,18 @@ class AnsibleGalaxyInstall(ModuleHelper): required_if=[('type', 'both', ['requirements_file'])], supports_check_mode=False, ) + use_old_vardict = False command = 'ansible-galaxy' command_args_formats = dict( - type=fmt.as_func(lambda v: [] if v == 'both' else [v]), - galaxy_cmd=fmt.as_list(), - requirements_file=fmt.as_opt_val('-r'), - dest=fmt.as_opt_val('-p'), - force=fmt.as_bool("--force"), - no_deps=fmt.as_bool("--no-deps"), - version=fmt.as_bool("--version"), - name=fmt.as_list(), + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), ) def _make_runner(self, lang): @@ -232,25 +235,18 @@ class AnsibleGalaxyInstall(ModuleHelper): try: runner = self._make_runner("C.UTF-8") with runner("version", check_rc=False, output_process=process) as ctx: - return runner, ctx.run(version=True) - except UnsupportedLocale as e: + return runner, ctx.run() + except UnsupportedLocale: runner = self._make_runner("en_US.UTF-8") with runner("version", check_rc=True, output_process=process) as ctx: - return runner, ctx.run(version=True) + return runner, ctx.run() def __init_module__(self): - # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang) self.runner, self.ansible_version = self._get_ansible_galaxy_version() if self.ansible_version < (2, 11): self.module.fail_json( - msg="Support for Ansible 2.9 and ansible-base 2.10 has ben removed." + msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed." ) - # Collection install output changed: - # ansible-base 2.10: "coll.name (x.y.z)" - # ansible-core 2.11+: "coll.name:x.y.z" - self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' - r' was installed successfully$') self.vars.set("new_collections", {}, change=True) self.vars.set("new_roles", {}, change=True) if self.vars.type != "collection": From d96b2642bc25d5febaa136266b2f73ec0ea4bb6e Mon Sep 17 00:00:00 2001 From: Herschdorfer Date: Sun, 26 May 2024 13:59:05 +0200 Subject: [PATCH 098/482] Update lxd_container.py documentation. (#8309) remove dead link from debian example --- plugins/modules/lxd_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index b82e2be9b7..f44523a751 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -400,7 +400,7 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: https://images.linuxcontainers.org + server: [...] # URL to the image server alias: debian/11 timeout: 600 ''' From 43cb5a0d54856f1f09026e476a5b8ee03b44cf0b Mon Sep 17 00:00:00 2001 From: Strahinja Kustudic Date: Sun, 26 May 2024 21:07:09 +0200 Subject: [PATCH 099/482] Fix the homebrew module failing because of warnings (#8406) Instead of checking if there is an error message, which can also be a warning, we now check the return code. This commit fixes #8229 #7044 Co-authored-by: Strahinja Kustudic --- .../fragments/8406-fix-homebrew-cask-warning.yaml | 2 ++ plugins/modules/homebrew.py | 4 ++-- .../integration/targets/homebrew/tasks/docker.yml | 14 -------------- 3 files changed, 4 insertions(+), 16 deletions(-) create mode 100644 changelogs/fragments/8406-fix-homebrew-cask-warning.yaml diff --git a/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml b/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml new file mode 100644 index 0000000000..0e3bf38ed3 --- /dev/null +++ b/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044). diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 388682d924..2b60846b43 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -415,9 +415,9 @@ class Homebrew(object): if self.force_formula: cmd.append("--formula") rc, out, err = self.module.run_command(cmd) - if err: + if rc != 0: self.failed = True - self.message = err.strip() + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) raise HomebrewException(self.message) data = json.loads(out) diff --git a/tests/integration/targets/homebrew/tasks/docker.yml b/tests/integration/targets/homebrew/tasks/docker.yml index 3b9e2ea6b4..c7f282ba2d 100644 --- a/tests/integration/targets/homebrew/tasks/docker.yml +++ b/tests/integration/targets/homebrew/tasks/docker.yml @@ -12,20 +12,6 @@ path: "{{ brew_which.stdout }}" register: brew_stat -- name: MACOS | Install docker without --formula - community.general.homebrew: - name: docker - state: present - become: true - become_user: "{{ brew_stat.stat.pw_name }}" - ignore_errors: true - register: result - -- name: Check that installing docker without --formula raises warning - assert: - that: - - result is failed - - name: MACOS | Install docker community.general.homebrew: name: docker From 572caeaa39ad3efd6083afafba9fd4fe4c88f9fd Mon Sep 17 00:00:00 2001 From: Thomas Bach <63091663+thomasbach-dev@users.noreply.github.com> Date: Sun, 26 May 2024 21:07:48 +0200 Subject: [PATCH 100/482] keycloak_client: avoid TypeError if `result["attributes"]` is a list (#8403) * fix(keycloak_client): avoid TypeError if attributes is a list As sanitize_cr might be executed after normalise_cr, result['attributes'] can be of type list and we run into: TypeError: list indices must be integers or slices, not str * Update changelog fragment. --------- Co-authored-by: Felix Fontein --- .../fragments/8403-fix-typeerror-in-keycloak-client.yaml | 2 ++ plugins/modules/keycloak_client.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml diff --git a/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml b/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml new file mode 100644 index 0000000000..b8acf7b09b --- /dev/null +++ b/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index cd9c60bacf..3628e5a517 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -775,8 +775,9 @@ def sanitize_cr(clientrep): if 'secret' in result: result['secret'] = 'no_log' if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' + attributes = result['attributes'] + if isinstance(attributes, dict) and 'saml.signing.private.key' in attributes: + attributes['saml.signing.private.key'] = 'no_log' return normalise_cr(result) From e690317e3ac030d76edafb3801a164b5b1ae95a6 Mon Sep 17 00:00:00 2001 From: Allen Smith Date: Sat, 1 Jun 2024 13:33:29 -0600 Subject: [PATCH 101/482] Add partial diff support, not in check mode to openbsd_pkg (#8402) * Add partial diff support, not in check mode * Add changelog fragment * Fix PEP8. Want to run Black against this so badly. * Update changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml Co-authored-by: Felix Fontein * Update plugins/modules/openbsd_pkg.py Co-authored-by: Felix Fontein * Update plugins/modules/openbsd_pkg.py Co-authored-by: Felix Fontein * Remove unneeded comment --------- Co-authored-by: Allen Smith Co-authored-by: Felix Fontein --- .../8402-add-diif-mode-openbsd-pkg.yml | 2 ++ plugins/modules/openbsd_pkg.py | 26 ++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml diff --git a/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml b/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml new file mode 100644 index 0000000000..2a4e7dfd8d --- /dev/null +++ b/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml @@ -0,0 +1,2 @@ +minor_changes: + - openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py index c831136110..69ac7bff8e 100644 --- a/plugins/modules/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -24,7 +24,10 @@ attributes: check_mode: support: full diff_mode: - support: none + support: partial + version_added: 9.1.0 + details: + - Only works when check mode is not enabled. options: name: description: @@ -159,6 +162,20 @@ def execute_command(cmd, module): return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) +def get_all_installed(module): + """ + Get all installed packaged. Used to support diff mode + """ + command = 'pkg_info -Iq' + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_all_installed(): %s" % stderr) + + return stdout + + # Function used to find out if a package is currently installed. def get_package_state(names, pkg_spec, module): info_cmd = 'pkg_info -Iq' @@ -573,10 +590,13 @@ def main(): result['name'] = name result['state'] = state result['build'] = build + result['diff'] = {} # The data structure used to keep track of package information. pkg_spec = {} + new_package_list = original_package_list = get_all_installed(module) + if build is True: if not os.path.isdir(ports_dir): module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) @@ -661,6 +681,10 @@ def main(): result['changed'] = combined_changed + if result['changed'] and not module.check_mode: + new_package_list = get_all_installed(module) + result['diff'] = dict(before=original_package_list, after=new_package_list) + module.exit_json(**result) From 7d72300c3648cb69b571c202ff71bd280e6e4b0f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 2 Jun 2024 08:44:50 +1200 Subject: [PATCH 102/482] add cmd_runner_fmt.stack decorator (#8415) * add cmd_runner_fmt.stack decorator * fix sanity * fix typo * add changelog frag --- .../fragments/8415-cmd-runner-stack.yml | 2 ++ plugins/module_utils/cmd_runner.py | 25 ++++++++++++++++++- .../plugins/module_utils/test_cmd_runner.py | 5 +++- 3 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8415-cmd-runner-stack.yml diff --git a/changelogs/fragments/8415-cmd-runner-stack.yml b/changelogs/fragments/8415-cmd-runner-stack.yml new file mode 100644 index 0000000000..555683e057 --- /dev/null +++ b/changelogs/fragments/8415-cmd-runner-stack.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 2bf2b32e8c..aab654f76f 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -94,13 +94,23 @@ class _ArgFormat(object): self.ignore_none = ignore_none self.ignore_missing_value = ignore_missing_value - def __call__(self, value, ctx_ignore_none): + def __call__(self, value, ctx_ignore_none=True): ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none if value is None and ignore_none: return [] f = self.func return [str(x) for x in f(value)] + def __str__(self): + return "".format( + self.func, + self.ignore_none, + self.ignore_missing_value, + ) + + def __repr__(self): + return str(self) + class _Format(object): @staticmethod @@ -184,6 +194,19 @@ class _Format(object): return func(**v) return wrapper + @staticmethod + def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + class CmdRunner(object): """ diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index 6816afb34c..fcdffe7d2c 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -47,6 +47,9 @@ TC_FORMATS = dict( simple_fixed_false=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), False, ["--always-here", "--forever"], None), simple_fixed_none=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), None, ["--always-here", "--forever"], None), simple_fixed_str=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), "something", ["--always-here", "--forever"], None), + stack_optval__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_optval), "-t"), ["potatoes", "bananas"], ["-tpotatoes", "-tbananas"], None), + stack_opt_val__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val), "-t"), ["potatoes", "bananas"], ["-t", "potatoes", "-t", "bananas"], None), + stack_opt_eq_val__int=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_eq_val), "--answer"), [42, 17], ["--answer=42", "--answer=17"], None), ) if tuple(version_info) >= (3, 1): from collections import OrderedDict @@ -67,7 +70,7 @@ TC_FORMATS_IDS = sorted(TC_FORMATS.keys()) def test_arg_format(func, value, expected, exception): fmt_func = func() try: - actual = fmt_func(value, ctx_ignore_none=True) + actual = fmt_func(value) print("formatted string = {0}".format(actual)) assert actual == expected, "actual = {0}".format(actual) except Exception as e: From 54df0c9b3a210c423eb84ddf25c9e8b53caa1316 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 2 Jun 2024 08:45:17 +1200 Subject: [PATCH 103/482] django_createcachetable: new module (#8412) * django_createcachetabe: new module * add --noinput arg to testcase * add module to BOTMETA * rename module class name * fix examples documentation * remove unused config * adjust version_added --- .github/BOTMETA.yml | 2 + plugins/doc_fragments/django.py | 9 +++ plugins/module_utils/django.py | 39 +++++++++-- plugins/modules/django_createcachetable.py | 67 +++++++++++++++++++ .../modules/test_django_createcachetable.py | 13 ++++ .../modules/test_django_createcachetable.yaml | 15 +++++ 6 files changed, 141 insertions(+), 4 deletions(-) create mode 100644 plugins/modules/django_createcachetable.py create mode 100644 tests/unit/plugins/modules/test_django_createcachetable.py create mode 100644 tests/unit/plugins/modules/test_django_createcachetable.yaml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index add3249355..ef10a32e0f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -506,6 +506,8 @@ files: ignore: scottanderson42 tastychutney labels: django_manage maintainers: russoz + $modules/django_createcachetable.py: + maintainers: russoz $modules/django_command.py: maintainers: russoz $modules/dnf_versionlock.py: diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py index d92799937d..f89ec91448 100644 --- a/plugins/doc_fragments/django.py +++ b/plugins/doc_fragments/django.py @@ -51,3 +51,12 @@ seealso: Please make sure that you select the right version of Django in the version selector on that page. link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ ''' + + DATABASE = r''' +options: + database: + description: + - Specify the database to be used. + type: str + default: default +''' diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index fbaf840db2..5fb375c6fd 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -7,6 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -33,6 +34,18 @@ _django_std_arg_fmts = dict( skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), ) +_django_database_args = dict( + database=dict(type="str", default="default"), +) + +_args_menu = dict( + std=(django_std_args, _django_std_arg_fmts), + database=(_django_database_args, {"database": cmd_runner_fmt.as_opt_eq_val("--database")}), + noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), +) + class _DjangoRunner(PythonRunner): def __init__(self, module, arg_formats=None, **kwargs): @@ -55,15 +68,30 @@ class DjangoModuleHelper(ModuleHelper): arg_formats = {} django_admin_arg_order = () use_old_vardict = False + _django_args = [] + _check_mode_arg = "" def __init__(self): - argument_spec = dict(django_std_args) - argument_spec.update(self.module.get("argument_spec", {})) - self.module["argument_spec"] = argument_spec + self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), + self.arg_formats, + *(["std"] + self._django_args)) super(DjangoModuleHelper, self).__init__(self.module) if self.django_admin_cmd is not None: self.vars.command = self.django_admin_cmd + @staticmethod + def _build_args(arg_spec, arg_format, *names): + res_arg_spec = {} + res_arg_fmts = {} + for name in names: + args, fmts = _args_menu[name] + res_arg_spec = dict_merge(res_arg_spec, args) + res_arg_fmts = dict_merge(res_arg_fmts, fmts) + res_arg_spec = dict_merge(res_arg_spec, arg_spec) + res_arg_fmts = dict_merge(res_arg_fmts, arg_format) + + return res_arg_spec, res_arg_fmts + def __run__(self): runner = _DjangoRunner(self.module, default_args_order=self.django_admin_arg_order, @@ -71,7 +99,10 @@ class DjangoModuleHelper(ModuleHelper): venv=self.vars.venv, check_rc=True) with runner() as ctx: - results = ctx.run() + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + results = ctx.run(**run_params) self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py new file mode 100644 index 0000000000..b038e0358f --- /dev/null +++ b/plugins/modules/django_createcachetable.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: django_createcachetable +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin createcachetable) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin createcachetable). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = """ +- name: Create cache table in the default database + community.general.django_createcachetable: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = """ +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCreateCacheTable(DjangoModuleHelper): + module = dict( + supports_check_mode=True, + ) + django_admin_cmd = "createcachetable" + django_admin_arg_order = "noinput database dry_run" + _django_args = ["noinput", "database", "dry_run"] + _check_mode_arg = "dry_run" + + +def main(): + DjangoCreateCacheTable.execute() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_django_createcachetable.py b/tests/unit/plugins/modules/test_django_createcachetable.py new file mode 100644 index 0000000000..5a4b89c0c1 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_createcachetable.py @@ -0,0 +1,13 @@ +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import django_createcachetable +from .helper import Helper + + +Helper.from_module(django_createcachetable, __name__) diff --git a/tests/unit/plugins/modules/test_django_createcachetable.yaml b/tests/unit/plugins/modules/test_django_createcachetable.yaml new file mode 100644 index 0000000000..1808b163fb --- /dev/null +++ b/tests/unit/plugins/modules/test_django_createcachetable.yaml @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: command_success + input: + settings: whatever.settings + run_command_calls: + - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "whatever\n" + err: "" From 58ce19d2c2ddd000dfef999148e0990fdea6fc87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20GATELLIER?= <26511053+lgatellier@users.noreply.github.com> Date: Sat, 1 Jun 2024 22:45:51 +0200 Subject: [PATCH 104/482] feat(gitlab modules): remove basic auth (#8405) BREAKING CHANGE : Remove basic auth against GitLab API --- .../8405-gitlab-remove-basic-auth.yml | 2 ++ plugins/module_utils/gitlab.py | 31 ++++++------------- 2 files changed, 11 insertions(+), 22 deletions(-) create mode 100644 changelogs/fragments/8405-gitlab-remove-basic-auth.yml diff --git a/changelogs/fragments/8405-gitlab-remove-basic-auth.yml b/changelogs/fragments/8405-gitlab-remove-basic-auth.yml new file mode 100644 index 0000000000..f8a03a3d71 --- /dev/null +++ b/changelogs/fragments/8405-gitlab-remove-basic-auth.yml @@ -0,0 +1,2 @@ +removed_features: + - gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405). diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index 224789a71e..3c0014cfe9 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -111,29 +111,16 @@ def gitlab_authentication(module, min_version=None): verify = ca_path if validate_certs and ca_path else validate_certs try: - # python-gitlab library remove support for username/password authentication since 1.13.0 - # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 - # This condition allow to still support older version of the python-gitlab library - if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"): - module.deprecate( - "GitLab basic auth is deprecated and will be removed in next major version, " - "using another auth method (API token or OAuth) is strongly recommended.", - version='10.0.0', - collection_name='community.general') - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, email=gitlab_user, password=gitlab_password, - private_token=gitlab_token, api_version=4) - else: - # We can create an oauth_token using a username and password - # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow - if gitlab_user: - data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} - resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) - resp_data = resp.json() - gitlab_oauth_token = resp_data["access_token"] - - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, - oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) + # We can create an oauth_token using a username and password + # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow + if gitlab_user: + data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} + resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) + resp_data = resp.json() + gitlab_oauth_token = resp_data["access_token"] + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, + oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) From 5a5188a45338ba7e5631d47f43102ab08c77d874 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 2 Jun 2024 10:16:53 +1200 Subject: [PATCH 105/482] deps module_utils: add docs (#8417) * add docs for the deps module utils * wordsmithing * fix reference and filename * add entries to BOTMETA.yml * Update docs/docsite/rst/moddev_guide_deps.rst Co-authored-by: Felix Fontein * adjust docs organisation * adjust docs organisation II * PR adjustments --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + docs/docsite/extra-docs.yml | 3 ++ docs/docsite/rst/guide_deps.rst | 74 +++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+) create mode 100644 docs/docsite/rst/guide_deps.rst diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ef10a32e0f..7f98718772 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1456,6 +1456,8 @@ files: maintainers: baldwinSPC nurfet-becirevic t0mk teebes docs/docsite/rst/guide_scaleway.rst: maintainers: $team_scaleway + docs/docsite/rst/guide_deps.rst: + maintainers: russoz docs/docsite/rst/test_guide.rst: maintainers: felixfontein ######################### diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 529573606c..6b1d53ccd5 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -14,3 +14,6 @@ sections: - guide_online - guide_packet - guide_scaleway + - title: Developer Guides + toctree: + - guide_deps diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst new file mode 100644 index 0000000000..4c0c4687a4 --- /dev/null +++ b/docs/docsite/rst/guide_deps.rst @@ -0,0 +1,74 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_deps: + +``deps`` Guide +============== + + +Using ``deps`` +^^^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies +the importing of code as described in :ref:`Importing and using shared code `. +Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins. + +The same example from the Developer Guide would become: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils import deps + + with deps.declare("foo"): + import foo + +Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do + +.. code-block:: python + + deps.validate(module) # assuming module is a valid AnsibleModule instance + +By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate +a message about a failing import. That function accepts parameters ``reason`` and ``url``, and +and so does ``deps```: + +.. code-block:: python + + with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"): + import foo + +If you would rather write a custom message instead of using ``missing_required_lib`` then do: + +.. code-block:: python + + with deps.declare("foo", msg="Custom msg explaining why foo is needed"): + import foo + +``deps`` allows for multiple dependencies to be declared: + +.. code-block:: python + + with deps.declare("foo"): + import foo + + with deps.declare("bar"): + import bar + + with deps.declare("doe"): + import doe + +By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired, +they can be validated selectively by doing: + +.. code-block:: python + + deps.validate(module, "foo") # only validates the "foo" dependency + + deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies + + deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar" + +.. versionadded:: 6.1.0 From d46e12e2809f939dc433fa34335e31c6999d6750 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 2 Jun 2024 10:17:26 +1200 Subject: [PATCH 106/482] ansible_galaxy_install: add upgrade feature (#8431) * add upgrade feature * add changelog frag * Update plugins/modules/ansible_galaxy_install.py * Update plugins/modules/ansible_galaxy_install.py --- changelogs/fragments/8431-galaxy-upgrade.yml | 2 + plugins/modules/ansible_galaxy_install.py | 27 +++++++-- .../ansible_galaxy_install/tasks/main.yml | 55 +++++++++++++++++++ 3 files changed, 78 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8431-galaxy-upgrade.yml diff --git a/changelogs/fragments/8431-galaxy-upgrade.yml b/changelogs/fragments/8431-galaxy-upgrade.yml new file mode 100644 index 0000000000..9be9ca93c8 --- /dev/null +++ b/changelogs/fragments/8431-galaxy-upgrade.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351). diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index d382ed93a9..b0f3aeb5da 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -32,6 +32,19 @@ attributes: diff_mode: support: none options: + state: + description: + - > + If O(state=present) then the collection or role will be installed. + Note that the collections and roles are not updated with this option. + - > + Currently the O(state=latest) is ignored unless O(type=collection), and it will + ensure the collection is installed and updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + type: str + choices: [ present, latest ] + default: present + version_added: 9.1.0 type: description: - The type of installation performed by C(ansible-galaxy). @@ -69,7 +82,8 @@ options: default: false force: description: - - Force overwriting an existing role or collection. + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output will always report C(changed=true). - Using O(force=true) is mandatory when downgrading. type: bool default: false @@ -188,6 +202,7 @@ class AnsibleGalaxyInstall(ModuleHelper): output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') module = dict( argument_spec=dict( + state=dict(type='str', choices=['present', 'latest'], default='present'), type=dict(type='str', choices=('collection', 'role', 'both'), required=True), name=dict(type='str'), requirements_file=dict(type='path'), @@ -206,6 +221,7 @@ class AnsibleGalaxyInstall(ModuleHelper): command_args_formats = dict( type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), requirements_file=cmd_runner_fmt.as_opt_val('-r'), dest=cmd_runner_fmt.as_opt_val('-p'), force=cmd_runner_fmt.as_bool("--force"), @@ -244,9 +260,7 @@ class AnsibleGalaxyInstall(ModuleHelper): def __init_module__(self): self.runner, self.ansible_version = self._get_ansible_galaxy_version() if self.ansible_version < (2, 11): - self.module.fail_json( - msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed." - ) + self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") self.vars.set("new_collections", {}, change=True) self.vars.set("new_roles", {}, change=True) if self.vars.type != "collection": @@ -299,8 +313,9 @@ class AnsibleGalaxyInstall(ModuleHelper): elif match.group("role"): self.vars.new_roles[match.group("role")] = match.group("rversion") - with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx: - ctx.run(galaxy_cmd="install") + upgrade = (self.vars.type == "collection" and self.vars.state == "latest") + with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) if self.verbosity > 2: self.vars.set("run_info", ctx.run_info) diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml index 1ecd9980d4..5c4af6d167 100644 --- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -4,10 +4,16 @@ # SPDX-License-Identifier: GPL-3.0-or-later ################################################### +- name: Make directory install_c + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/install_c" + state: directory + - name: Install collection netbox.netbox community.general.ansible_galaxy_install: type: collection name: netbox.netbox + dest: "{{ remote_tmp_dir }}/install_c" register: install_c0 - name: Assert collection netbox.netbox was installed @@ -20,6 +26,7 @@ community.general.ansible_galaxy_install: type: collection name: netbox.netbox + dest: "{{ remote_tmp_dir }}/install_c" register: install_c1 - name: Assert collection was not installed @@ -28,10 +35,16 @@ - install_c1 is not changed ################################################### +- name: Make directory install_r + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/install_r" + state: directory + - name: Install role ansistrano.deploy community.general.ansible_galaxy_install: type: role name: ansistrano.deploy + dest: "{{ remote_tmp_dir }}/install_r" register: install_r0 - name: Assert collection ansistrano.deploy was installed @@ -44,6 +57,7 @@ community.general.ansible_galaxy_install: type: role name: ansistrano.deploy + dest: "{{ remote_tmp_dir }}/install_r" register: install_r1 - name: Assert role was not installed @@ -86,3 +100,44 @@ assert: that: - install_rq1 is not changed + +################################################### +- name: Make directory upgrade_c + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/upgrade_c" + state: directory + +- name: Install collection netbox.netbox 3.17.0 + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox:3.17.0 + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c0 + +- name: Assert collection netbox.netbox was installed + assert: + that: + - upgrade_c0 is changed + - '"netbox.netbox" in upgrade_c0.new_collections' + +- name: Upgrade collection netbox.netbox + community.general.ansible_galaxy_install: + state: latest + type: collection + name: netbox.netbox + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c1 + +- name: Upgrade collection netbox.netbox (again) + community.general.ansible_galaxy_install: + state: latest + type: collection + name: netbox.netbox + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c2 + +- name: Assert collection was not installed + assert: + that: + - upgrade_c1 is changed + - upgrade_c2 is not changed From 961767e2dd14f09975d6ab2498117f83cc5acfbd Mon Sep 17 00:00:00 2001 From: Mike Raineri Date: Mon, 3 Jun 2024 00:49:40 -0400 Subject: [PATCH 107/482] Redfish: Add options to check the availability of the service (#8434) * Redfish: Add options to check the availability of the service Signed-off-by: Mike Raineri * Updates based on review feedback Signed-off-by: Mike Raineri * Updated comment to reflect changed behavior Signed-off-by: Mike Raineri * Added changelog fragments Signed-off-by: Mike Raineri * Update changelogs/fragments/8051-Redfish-Wait-For-Service.yml Co-authored-by: Felix Fontein * Update plugins/modules/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/redfish_command.py Co-authored-by: Felix Fontein --------- Signed-off-by: Mike Raineri Co-authored-by: Felix Fontein --- .../8051-Redfish-Wait-For-Service.yml | 3 + plugins/module_utils/redfish_utils.py | 56 +++++++++++++++++-- plugins/modules/redfish_command.py | 30 +++++++++- plugins/modules/redfish_info.py | 22 +++++++- 4 files changed, 102 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/8051-Redfish-Wait-For-Service.yml diff --git a/changelogs/fragments/8051-Redfish-Wait-For-Service.yml b/changelogs/fragments/8051-Redfish-Wait-For-Service.yml new file mode 100644 index 0000000000..826c40e8af --- /dev/null +++ b/changelogs/fragments/8051-Redfish-Wait-For-Service.yml @@ -0,0 +1,3 @@ +minor_changes: + - redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). + - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 6935573d0b..139628bd9f 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -11,6 +11,7 @@ import os import random import string import gzip +import time from io import BytesIO from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native @@ -132,11 +133,13 @@ class RedfishUtils(object): return resp # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri, override_headers=None, allow_no_resp=False): + def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) if override_headers: req_headers.update(override_headers) username, password, basic_auth = self._auth_params(req_headers) + if timeout is None: + timeout = self.timeout try: # Service root is an unauthenticated resource; remove credentials # in case the caller will be using sessions later. @@ -146,7 +149,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=timeout) headers = dict((k.lower(), v) for (k, v) in resp.info().items()) try: if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'): @@ -624,6 +627,24 @@ class RedfishUtils(object): allowable_values = default_values return allowable_values + def check_service_availability(self): + """ + Checks if the service is accessible. + + :return: dict containing the status of the service + """ + + # Get the service root + # Override the timeout since the service root is expected to be readily + # available. + service_root = self.get_request(self.root_uri + self.service_root, timeout=10) + if service_root['ret'] is False: + # Failed, either due to a timeout or HTTP error; not available + return {'ret': True, 'available': False} + + # Successfully accessed the service root; available + return {'ret': True, 'available': True} + def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] @@ -1083,11 +1104,12 @@ class RedfishUtils(object): return self.manage_power(command, self.systems_uri, '#ComputerSystem.Reset') - def manage_manager_power(self, command): + def manage_manager_power(self, command, wait=False, wait_timeout=120): return self.manage_power(command, self.manager_uri, - '#Manager.Reset') + '#Manager.Reset', wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name): + def manage_power(self, command, resource_uri, action_name, wait=False, + wait_timeout=120): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', @@ -1147,6 +1169,30 @@ class RedfishUtils(object): response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response + + # If requested to wait for the service to be available again, block + # until it's ready + if wait: + elapsed_time = 0 + start_time = time.time() + # Start with a large enough sleep. Some services will process new + # requests while in the middle of shutting down, thus breaking out + # early. + time.sleep(30) + + # Periodically check for the service's availability. + while elapsed_time <= wait_timeout: + status = self.check_service_availability() + if status['available']: + # It's available; we're done + break + time.sleep(5) + elapsed_time = time.time() - start_time + + if elapsed_time > wait_timeout: + # Exhausted the wait timer; error + return {'ret': False, 'changed': True, + 'msg': 'The service did not become available after %d seconds' % wait_timeout} return {'ret': True, 'changed': True} def manager_reset_to_defaults(self, command): diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index d351e7c1d8..0f7a64b81f 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -288,6 +288,20 @@ options: type: str choices: [ ResetAll, PreserveNetworkAndUsers, PreserveNetwork ] version_added: 8.6.0 + wait: + required: false + description: + - Block until the service is ready again. + type: bool + default: false + version_added: 9.1.0 + wait_timeout: + required: false + description: + - How long to block until the service is ready again before giving up. + type: int + default: 120 + version_added: 9.1.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -685,6 +699,16 @@ EXAMPLES = ''' username: "{{ username }}" password: "{{ password }}" + - name: Restart manager power gracefully and wait for it to be available + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + wait: True + - name: Restart manager power gracefully community.general.redfish_command: category: Manager @@ -841,7 +865,9 @@ def main(): ), strip_etag_quotes=dict(type='bool', default=False), reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), - bios_attributes=dict(type="dict") + bios_attributes=dict(type="dict"), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), ), required_together=[ ('username', 'password'), @@ -1016,7 +1042,7 @@ def main(): command = 'PowerGracefulRestart' if command.startswith('Power'): - result = rf_utils.manage_manager_power(command) + result = rf_utils.manage_manager_power(command, module.params['wait'], module.params['wait_timeout']) elif command == 'ClearLogs': result = rf_utils.clear_logs() elif command == 'VirtualMediaInsert': diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index 3b594b7a2c..efcb34f016 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -359,6 +359,16 @@ EXAMPLES = ''' baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" + + - name: Check the availability of the service with a timeout of 5 seconds + community.general.redfish_info: + category: Service + command: CheckAvailability + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 5 + register: result ''' RETURN = ''' @@ -385,6 +395,7 @@ CATEGORY_COMMANDS_ALL = { "GetUpdateStatus"], "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"], + "Service": ["CheckAvailability"], } CATEGORY_COMMANDS_DEFAULT = { @@ -393,7 +404,8 @@ CATEGORY_COMMANDS_DEFAULT = { "Accounts": "ListUsers", "Update": "GetFirmwareInventory", "Sessions": "GetSessions", - "Manager": "GetManagerNicInventory" + "Manager": "GetManagerNicInventory", + "Service": "CheckAvailability", } @@ -473,7 +485,13 @@ def main(): module.fail_json(msg="Invalid Category: %s" % category) # Organize by Categories / Commands - if category == "Systems": + if category == "Service": + # service-level commands are always available + for command in command_list: + if command == "CheckAvailability": + result["service"] = rf_utils.check_service_availability() + + elif category == "Systems": # execute only if we find a Systems resource resource = rf_utils._find_systems_resource() if resource['ret'] is False: From 5041ebe5b2cf4558496739c79d301c273e7d2ba7 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 3 Jun 2024 06:50:55 +0200 Subject: [PATCH 108/482] fix(opentelemetry): remove request from the logs (#8430) * fix(opentelemetry): remove request from the logs * add changelog * filter by task * add new bugfix * rename * support legacy and shortcat ansible tasks * Update plugins/callback/opentelemetry.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...hen-using-logs-with-uri-or-slurp-tasks.yaml | 3 +++ plugins/callback/opentelemetry.py | 18 +++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml diff --git a/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml b/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml new file mode 100644 index 0000000000..29da61c8bf --- /dev/null +++ b/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml @@ -0,0 +1,3 @@ +bugfixes: + - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430). + - opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). \ No newline at end of file diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 58cfa057b7..c6e8a87c16 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -556,11 +556,19 @@ class CallbackModule(CallbackBase): self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') - def dump_results(self, result): + def dump_results(self, task, result): """ dump the results if disable_logs is not enabled """ if self.disable_logs: return "" - return self._dump_results(result._result) + # ansible.builtin.uri contains the response in the json field + save = dict(result._result) + + if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"): + save.pop("json") + # ansible.builtin.slurp contains the response in the content field + if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"): + save.pop("content") + return self._dump_results(save) def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -611,7 +619,7 @@ class CallbackModule(CallbackBase): self.tasks_data, status, result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): @@ -619,7 +627,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'ok', result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): @@ -627,7 +635,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'skipped', result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): From 6f8f12f762365686e18ce4a2bb0847b1c8b79cc7 Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Tue, 4 Jun 2024 06:01:25 +0200 Subject: [PATCH 109/482] Feature filter keep_keys (#8456) * Add filter keep_keys. Implement feature request #8438 * Fix comment indentation. * Fix regex reference. * Fix indentation. * Fix isinstance list. * Update plugins/plugin_utils/keys_filter.py Co-authored-by: Felix Fontein * Update plugins/plugin_utils/keys_filter.py Co-authored-by: Felix Fontein * Update plugins/plugin_utils/keys_filter.py Co-authored-by: Felix Fontein * Update plugins/plugin_utils/keys_filter.py Co-authored-by: Felix Fontein * Update plugins/filter/keep_keys.py Co-authored-by: Felix Fontein * Update documentation, examples, and integration tests. * _keys_filter_target_str returns tuple of unique target strings if target is list. Update documentation, function comments, and error messages. * Sort maintainers. * Update plugins/filter/keep_keys.py Co-authored-by: Felix Fontein * Update examples with explicit collection. --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 4 + plugins/filter/keep_keys.py | 138 ++++++++++++++++++ plugins/plugin_utils/keys_filter.py | 108 ++++++++++++++ .../targets/filter_keep_keys/aliases | 5 + .../filter_keep_keys/tasks/keep_keys.yml | 79 ++++++++++ .../targets/filter_keep_keys/tasks/main.yml | 7 + .../targets/filter_keep_keys/vars/main.yml | 33 +++++ 7 files changed, 374 insertions(+) create mode 100644 plugins/filter/keep_keys.py create mode 100644 plugins/plugin_utils/keys_filter.py create mode 100644 tests/integration/targets/filter_keep_keys/aliases create mode 100644 tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml create mode 100644 tests/integration/targets/filter_keep_keys/tasks/main.yml create mode 100644 tests/integration/targets/filter_keep_keys/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 7f98718772..ef1b879de3 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -157,6 +157,8 @@ files: $filters/jc.py: maintainers: kellyjonbrazil $filters/json_query.py: {} + $filters/keep_keys.py: + maintainers: vbotka $filters/lists.py: maintainers: cfiehe $filters/lists_difference.yml: @@ -1417,6 +1419,8 @@ files: ignore: matze labels: zypper maintainers: $team_suse + $plugin_utils/keys_filter.py: + maintainers: vbotka $plugin_utils/unsafe.py: maintainers: felixfontein $tests/a_module.py: diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py new file mode 100644 index 0000000000..009e986ab2 --- /dev/null +++ b/plugins/filter/keep_keys.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: keep_keys + short_description: Keep specific keys from dictionaries in a list + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter keeps only specified keys from a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. + t: [k0_x0, k1_x1] + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. + t: [k0, k1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. + t: [x0, x1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. + t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. + t: '^.*[01]_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. + r: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + # 6) By default match keys that equal the target. + t: k0_x0 + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 7) Match keys that start with the target. + t: k0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. + t: x0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. + t: '^.*0_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. + r: + - {k0_x0: A0} + - {k0_x0: A1} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def keep_keys(data, target=None, matching_parameter='equal'): + """keep specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, target, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is not None + + return [dict((k, v) for k, v in d.items() if keep_key(k)) for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'keep_keys': keep_keys, + } diff --git a/plugins/plugin_utils/keys_filter.py b/plugins/plugin_utils/keys_filter.py new file mode 100644 index 0000000000..37b7611c50 --- /dev/null +++ b/plugins/plugin_utils/keys_filter.py @@ -0,0 +1,108 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.six import string_types +from ansible.module_utils.common._collections_compat import Mapping, Sequence + + +def _keys_filter_params(data, target, matching_parameter): + """test parameters: + * data must be a list of dictionaries. All keys must be strings. + * target must be a non-empty sequence. + * matching_parameter is member of a list. + """ + + mp = matching_parameter + ml = ['equal', 'starts_with', 'ends_with', 'regex'] + + if not isinstance(data, Sequence): + msg = "First argument must be a list. %s is %s" + raise AnsibleFilterError(msg % (data, type(data))) + + for elem in data: + if not isinstance(elem, Mapping): + msg = "The data items must be dictionaries. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + + for elem in data: + if not all(isinstance(item, string_types) for item in elem.keys()): + msg = "Top level keys must be strings. keys: %s" + raise AnsibleFilterError(msg % elem.keys()) + + if not isinstance(target, Sequence): + msg = ("The target must be a string or a list. target is %s.") + raise AnsibleFilterError(msg % target) + + if len(target) == 0: + msg = ("The target can't be empty.") + raise AnsibleFilterError(msg) + + if mp not in ml: + msg = ("The matching_parameter must be one of %s. matching_parameter is %s") + raise AnsibleFilterError(msg % (ml, mp)) + + return + + +def _keys_filter_target_str(target, matching_parameter): + """test: + * If target is list all items are strings + * If matching_parameter=regex target is a string or list with single string + convert and return: + * tuple of unique target items, or + * tuple with single item, or + * compiled regex if matching_parameter=regex + """ + + if isinstance(target, list): + for elem in target: + if not isinstance(elem, string_types): + msg = "The target items must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + + if matching_parameter == 'regex': + if isinstance(target, string_types): + r = target + else: + if len(target) > 1: + msg = ("Single item is required in the target list if matching_parameter is regex.") + raise AnsibleFilterError(msg) + else: + r = target[0] + try: + tt = re.compile(r) + except re.error: + msg = ("The target must be a valid regex if matching_parameter is regex." + " target is %s") + raise AnsibleFilterError(msg % r) + elif isinstance(target, string_types): + tt = (target, ) + else: + tt = tuple(set(target)) + + return tt + + +def _keys_filter_target_dict(target, matching_parameter): + """test: + * target is a list of dictionaries + * ... + """ + + # TODO: Complete and use this in filter replace_keys + + if isinstance(target, list): + for elem in target: + if not isinstance(elem, Mapping): + msg = "The target items must be dictionary. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + + return diff --git a/tests/integration/targets/filter_keep_keys/aliases b/tests/integration/targets/filter_keep_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml b/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml new file mode 100644 index 0000000000..94825c9d61 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Test keep keys equal (default) + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ list1 | community.general.keep_keys(target=tt) }}" + tt: [k0_x0, k1_x1] + tags: equal_default + +- name: Test keep keys regex string + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ list1 | community.general.keep_keys(target=tt, matching_parameter=mp) }}" + mp: regex + tt: '^.*[01]_x.*$' + tags: regex_string + +- name: Test keep keys targets1 + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ targets1 }}" + loop_control: + label: "{{ item.mp }}: {{ item.tt }}" + vars: + rr: "{{ list1 | community.general.keep_keys(target=item.tt, matching_parameter=item.mp) }}" + tags: targets1 + +- name: Test keep keys targets2 + ansible.builtin.assert: + that: + - (rr | difference(result2) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ targets2 }}" + loop_control: + label: "{{ item.mp }}: {{ item.tt }}" + vars: + rr: "{{ list2 | community.general.keep_keys(target=item.tt, matching_parameter=item.mp) }}" + tags: targets2 diff --git a/tests/integration/targets/filter_keep_keys/tasks/main.yml b/tests/integration/targets/filter_keep_keys/tasks/main.yml new file mode 100644 index 0000000000..23457d1e11 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test keep_keys + import_tasks: keep_keys.yml diff --git a/tests/integration/targets/filter_keep_keys/vars/main.yml b/tests/integration/targets/filter_keep_keys/vars/main.yml new file mode 100644 index 0000000000..b25325253d --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/vars/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +targets1: + - {mp: equal, tt: [k0_x0, k1_x1]} + - {mp: starts_with, tt: [k0, k1]} + - {mp: ends_with, tt: [x0, x1]} + - {mp: regex, tt: ['^.*[01]_x.*$']} + - {mp: regex, tt: '^.*[01]_x.*$'} + +list1: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + +result1: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + +targets2: + - {mp: equal, tt: k0_x0} + - {mp: starts_with, tt: k0} + - {mp: ends_with, tt: x0} + - {mp: regex, tt: '^.*0_x.*$'} + +list2: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + +result2: + - {k0_x0: A0} + - {k0_x0: A1} From 0129346eda4cfa964bb146c16b980df5c5c19059 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 4 Jun 2024 06:27:50 +0200 Subject: [PATCH 110/482] git_config: deprecate reading values (#8453) Deprecate reading values. --- .../8453-git_config-deprecate-read.yml | 3 ++ plugins/modules/git_config.py | 39 +++++++------------ 2 files changed, 17 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/8453-git_config-deprecate-read.yml diff --git a/changelogs/fragments/8453-git_config-deprecate-read.yml b/changelogs/fragments/8453-git_config-deprecate-read.yml new file mode 100644 index 0000000000..a291568fce --- /dev/null +++ b/changelogs/fragments/8453-git_config-deprecate-read.yml @@ -0,0 +1,3 @@ +deprecated_features: + - "git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453)." + - "git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453)." diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index a8d2ebe979..2c16821e9e 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -18,7 +18,7 @@ author: - Matthew Gamble (@djmattyg007) - Marius Gedminas (@mgedmin) requirements: ['git'] -short_description: Read and write git configuration +short_description: Update git configuration description: - The M(community.general.git_config) module changes git configuration by invoking C(git config). This is needed if you do not want to use M(ansible.builtin.template) for the entire git @@ -36,6 +36,8 @@ options: list_all: description: - List all settings (optionally limited to a given O(scope)). + - This option is B(deprecated) and will be removed from community.general 11.0.0. + Please use M(community.general.git_config_info) instead. type: bool default: false name: @@ -74,6 +76,8 @@ options: description: - When specifying the name of a single setting, supply a value to set that setting to the given value. + - From community.general 11.0.0 on, O(value) will be required if O(state=present). + To read values, use the M(community.general.git_config_info) module instead. type: str add_mode: description: @@ -143,29 +147,6 @@ EXAMPLES = ''' repo: /etc scope: local value: 'root@{{ ansible_fqdn }}' - -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=true - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: true - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: true - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: true - repo: /path/to/repo.git ''' RETURN = ''' @@ -193,7 +174,7 @@ from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), + list_all=dict(required=False, type='bool', default=False, removed_in_version='11.0.0', removed_from_collection='community.general'), name=dict(type='str'), repo=dict(type='path'), file=dict(type='path'), @@ -222,6 +203,14 @@ def main(): new_value = params['value'] or '' add_mode = params['add_mode'] + if not unset and not new_value and not params['list_all']: + module.deprecate( + 'If state=present, a value must be specified from community.general 11.0.0 on.' + ' To read a config value, use the community.general.git_config_info module instead.', + version='11.0.0', + collection_name='community.general', + ) + scope = determine_scope(params) cwd = determine_cwd(scope, params) From 2a3819a696d0ee6d8646a2ff5583c01d8fffd356 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 6 Jun 2024 07:35:54 +0200 Subject: [PATCH 111/482] git_config: fix state=absent if value is present (#8452) * Fix state=absent if value is present. * Update changelog fragment. --- .../fragments/8452-git_config-absent.yml | 2 ++ plugins/modules/git_config.py | 2 +- .../targets/git_config/tasks/unset_value.yml | 24 +++++++++++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8452-git_config-absent.yml diff --git a/changelogs/fragments/8452-git_config-absent.yml b/changelogs/fragments/8452-git_config-absent.yml new file mode 100644 index 0000000000..11e0767713 --- /dev/null +++ b/changelogs/fragments/8452-git_config-absent.yml @@ -0,0 +1,2 @@ +bugfixes: + - "git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452)." diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index 2c16821e9e..95969c1b38 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -252,7 +252,7 @@ def main(): module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '') elif unset and not out: module.exit_json(changed=False, msg='no setting to unset') - elif new_value in old_values and (len(old_values) == 1 or add_mode == "add"): + elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: module.exit_json(changed=False, msg="") # Until this point, the git config was just read and in case no change is needed, the module has already exited. diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml index dfa535a2d3..5f8c52c96f 100644 --- a/tests/integration/targets/git_config/tasks/unset_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_value.yml @@ -18,6 +18,30 @@ scope: "{{ option_scope }}" register: get_result +- name: assert unset changed and deleted value + assert: + that: + - unset_result is changed + - unset_result.diff.before == option_value + "\n" + - unset_result.diff.after == "\n" + - get_result.config_value == '' + +- import_tasks: setup_value.yml + +- name: unsetting value with value specified + git_config: + name: "{{ option_name }}" + scope: "{{ option_scope }}" + value: "{{ option_value }}" + state: absent + register: unset_result + +- name: getting value + git_config: + name: "{{ option_name }}" + scope: "{{ option_scope }}" + register: get_result + - name: assert unset changed and deleted value assert: that: From a0ad2d58490a278566a69f4c4c8fd20a5cfa92c1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:36:39 +1200 Subject: [PATCH 112/482] add docs for the vardict module utils (#8460) * add docs for the vardict module utils * fix var name * add entry to BOTMETA * rollback adjustment in deps guide * Update docs/docsite/rst/guide_vardict.rst Co-authored-by: Felix Fontein * adjustments * Update docs/docsite/rst/guide_vardict.rst Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + docs/docsite/extra-docs.yml | 1 + docs/docsite/rst/guide_vardict.rst | 176 +++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+) create mode 100644 docs/docsite/rst/guide_vardict.rst diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ef1b879de3..4ffd0783b7 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1462,6 +1462,8 @@ files: maintainers: $team_scaleway docs/docsite/rst/guide_deps.rst: maintainers: russoz + docs/docsite/rst/guide_vardict.rst: + maintainers: russoz docs/docsite/rst/test_guide.rst: maintainers: felixfontein ######################### diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 6b1d53ccd5..3bed9e35fc 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -17,3 +17,4 @@ sections: - title: Developer Guides toctree: - guide_deps + - guide_vardict diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst new file mode 100644 index 0000000000..e870bf175c --- /dev/null +++ b/docs/docsite/rst/guide_vardict.rst @@ -0,0 +1,176 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_vardict: + +VarDict Guide +============= + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the +``VarDict`` class to help manage the module variables. That class is a container for module variables, +especially the ones for which the module must keep track of state changes, and the ones that should +be published as return values. + +Each variable has extra behaviors controlled by associated metadata, simplifying the generation of +output values from the module. + +Quickstart +"""""""""" + +The simplest way of using ``VarDict`` is: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.vardict import VarDict + +Then in ``main()``, or any other function called from there: + +.. code-block:: python + + vars = VarDict() + + # Next 3 statements are equivalent + vars.abc = 123 + vars["abc"] = 123 + vars.set("abc", 123) + + vars.xyz = "bananas" + vars.ghi = False + +And by the time the module is about to exit: + +.. code-block:: python + + results = vars.output() + module.exit_json(**results) + +That makes the return value of the module: + +.. code-block:: javascript + + { + "abc": 123, + "xyz": "bananas", + "ghi": false + } + +Metadata +"""""""" + +The metadata values associated with each variable are: + +- ``output: bool`` - marks the variable for module output as a module return value. +- ``fact: bool`` - marks the variable for module output as an Ansible fact. +- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output. +- ``change: bool`` - controls the detection of changes in the variable value. +- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable. +- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``. + +See the sections below for more details on how to use the metadata. + + +Using VarDict +^^^^^^^^^^^^^ + +Basic Usage +""""""""""" + +As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object, +and also as an object attribute, such as ``vars.abc``. The form using the ``set()`` +method is special in the sense that you can use it to set metadata values: + +.. code-block:: python + + vars.set("abc", 123, output=False) + vars.set("abc", 123, output=True, change=True) + +Another way to set metadata after the variables have been created is: + +.. code-block:: python + + vars.set_meta("abc", output=False) + vars.set_meta("abc", output=True, change=True, diff=True) + +You can use either operator and attribute forms to access the value of the variable. Other ways to +access its value and its metadata are: + +.. code-block:: python + + print("abc value = {0}".format(vars.var("abc")["value"])) # get the value + print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this + +The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and +cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception +is raised with the message "Name is reserved". + +Generating output +""""""""""""""""" + +By default, every variable create will be enable for output with minimum verbosity set to zero, in +other words, they will always be in the output by default. + +You can control that when creating the variable for the first time or later in the code: + +.. code-block:: python + + vars.set("internal", x + 4, output=False) + vars.set_meta("internal", output=False) + +You can also set the verbosity of some variable, like: + +.. code-block:: python + + vars.set("abc", x + 4) + vars.set("debug_x", x, verbosity=3) + + results = vars.output(module._verbosity) + module.exit_json(**results) + +If the module was invoked with verbosity lower than 3, then the output will only contain +the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``, +then the output will also contain ``debug_x``. + +Generating facts is very similar to regular output, but variables are not marked as facts by default. + +.. code-block:: python + + vars.set("modulefact", x + 4, fact=True) + vars.set("debugfact", x, fact=True, verbosity=3) + + results = vars.output(module._verbosity) + results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)} + module.exit_json(**results) + +Handling change +""""""""""""""" + +You can use ``VarDict`` to determine whether variables have had their values changed. + +.. code-block:: python + + vars.set("abc", 42, change=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + module.exit_json(**results) + +If tracking changes in variables, you may want to present the difference between the initial and the final +values of it. For that, you want to use: + +.. code-block:: python + + vars.set("abc", 42, change=True, diff=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + results["diff"] = vars.diff() + module.exit_json(**results) + +.. versionadded:: 6.1.0 From 1c4ab7fafc032700fc72032e97666aaa0ab6ba7f Mon Sep 17 00:00:00 2001 From: Daniel Date: Thu, 6 Jun 2024 07:37:08 +0200 Subject: [PATCH 113/482] Add support for SSHFP records to ipa_dnsrecord module (#8404) * Add support for SSHFP records to ipa_dnsrecord module * Create 8404-ipa_dnsrecord_sshfp.yml * Apply suggestions from code review Co-authored-by: Felix Fontein * Fix a typo in the example for ipa_dnsrecord with type SSHFP * Update plugins/modules/ipa_dnsrecord.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8404-ipa_dnsrecord_sshfp.yml | 2 ++ plugins/modules/ipa_dnsrecord.py | 27 ++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml diff --git a/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml b/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml new file mode 100644 index 0000000000..e989f5dbb1 --- /dev/null +++ b/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml @@ -0,0 +1,2 @@ +minor_changes: + - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index cb4ce03ddd..59475a55be 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -35,13 +35,14 @@ options: record_type: description: - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV' and 'MX' are supported. + - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX' and 'SSHFP' are supported. - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - "'SRV' and 'MX' are added in version 2.8." - "'NS' are added in comunity.general 8.2.0." + - "'SSHFP' are added in community.general 9.1.0." required: false default: 'A' - choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT'] + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] type: str record_value: description: @@ -57,6 +58,7 @@ options: - In the case of 'TXT' record type, this will be a text. - In the case of 'SRV' record type, this will be a service record. - In the case of 'MX' record type, this will be a mail exchanger record. + - In the case of 'SSHFP' record type, this will be an SSH fingerprint record. type: str record_values: description: @@ -71,6 +73,7 @@ options: - In the case of 'TXT' record type, this will be a text. - In the case of 'SRV' record type, this will be a service record. - In the case of 'MX' record type, this will be a mail exchanger record. + - In the case of 'SSHFP' record type, this will be an SSH fingerprint record. type: list elements: str record_ttl: @@ -175,6 +178,20 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: ChangeMe! + +- name: Retrieve the current sshfp fingerprints + ansible.builtin.command: ssh-keyscan -D localhost + register: ssh_hostkeys + +- name: Update the SSHFP records in DNS + community.general.ipa_dnsrecord: + name: "{{ inventory_hostname}}" + zone_name: example.com + record_type: 'SSHFP' + record_values: "{{ ssh_hostkeys.stdout.split('\n') | map('split', 'SSHFP ') | map('last') | list }}" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! ''' RETURN = r''' @@ -228,6 +245,8 @@ class DNSRecordIPAClient(IPAClient): item.update(srvrecord=value) elif details['record_type'] == 'MX': item.update(mxrecord=value) + elif details['record_type'] == 'SSHFP': + item.update(sshfprecord=value) self._post_json(method='dnsrecord_add', name=zone_name, item=item) @@ -266,6 +285,8 @@ def get_dnsrecord_dict(details=None): module_dnsrecord.update(srvrecord=details['record_values']) elif details['record_type'] == 'MX' and details['record_values']: module_dnsrecord.update(mxrecord=details['record_values']) + elif details['record_type'] == 'SSHFP' and details['record_values']: + module_dnsrecord.update(sshfprecord=details['record_values']) if details.get('record_ttl'): module_dnsrecord.update(dnsttl=details['record_ttl']) @@ -328,7 +349,7 @@ def ensure(module, client): def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX'] + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX', 'SSHFP'] argument_spec = ipa_argument_spec() argument_spec.update( zone_name=dict(type='str', required=True), From 06f13e79b1bfac4cb33cce300856618582f238ef Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Thu, 6 Jun 2024 23:34:31 +0200 Subject: [PATCH 114/482] Feature filter remove_keys (#8443) * Add filter remove_keys. * Add filter remove_keys integration test, fragment, and maintainer. * Update with plugins/plugin_utils/keys_filter.py * Update according PR #8456 * Update maintainers. * Fix typo in return doc. * Remove local keys_filter.py. Then rebase. * Add local keys_filter.py * Update plugins/filter/remove_keys.py Co-authored-by: Felix Fontein * Update plugins/filter/remove_keys.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/filter/remove_keys.py | 138 ++++++++++++++++++ .../targets/filter_remove_keys/aliases | 5 + .../targets/filter_remove_keys/tasks/main.yml | 7 + .../filter_remove_keys/tasks/remove_keys.yml | 79 ++++++++++ .../targets/filter_remove_keys/vars/main.yml | 33 +++++ 6 files changed, 264 insertions(+) create mode 100644 plugins/filter/remove_keys.py create mode 100644 tests/integration/targets/filter_remove_keys/aliases create mode 100644 tests/integration/targets/filter_remove_keys/tasks/main.yml create mode 100644 tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml create mode 100644 tests/integration/targets/filter_remove_keys/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 4ffd0783b7..ade18c0e33 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -172,6 +172,8 @@ files: $filters/lists_union.yml: maintainers: cfiehe $filters/random_mac.py: {} + $filters/remove_keys.py: + maintainers: vbotka $filters/time.py: maintainers: resmo $filters/to_days.yml: diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py new file mode 100644 index 0000000000..335f82d31f --- /dev/null +++ b/plugins/filter/remove_keys.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: remove_keys + short_description: Remove specific keys from dictionaries in a list + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter removes only specified keys from a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. + t: [k0_x0, k1_x1] + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. + t: [k0, k1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. + t: [x0, x1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. + t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. + t: '^.*[01]_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. + r: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + + # 6) By default match keys that equal the target. + t: k0_x0 + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 7) Match keys that start with the target. + t: k0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. + t: x0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. + t: '^.*0_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. + r: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def remove_keys(data, target=None, matching_parameter='equal'): + """remove specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, target, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key not in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return not key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return not key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is None + + return [dict((k, v) for k, v in d.items() if keep_key(k)) for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'remove_keys': remove_keys, + } diff --git a/tests/integration/targets/filter_remove_keys/aliases b/tests/integration/targets/filter_remove_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_remove_keys/tasks/main.yml b/tests/integration/targets/filter_remove_keys/tasks/main.yml new file mode 100644 index 0000000000..d4215d8c59 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test remove_keys + import_tasks: remove_keys.yml diff --git a/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml b/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml new file mode 100644 index 0000000000..121cd88cfd --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Test remove keys equal (default) + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ list1 | community.general.remove_keys(target=tt) }}" + tt: [k0_x0, k1_x1] + tags: equal_default + +- name: Test remove keys regex string + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ list1 | community.general.remove_keys(target=tt, matching_parameter=mp) }}" + mp: regex + tt: '^.*[01]_x.*$' + tags: regex_string + +- name: Test remove keys targets1 + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ targets1 }}" + loop_control: + label: "{{ item.mp }}: {{ item.tt }}" + vars: + rr: "{{ list1 | community.general.remove_keys(target=item.tt, matching_parameter=item.mp) }}" + tags: targets1 + +- name: Test remove keys targets2 + ansible.builtin.assert: + that: + - (rr | difference(result2) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ targets2 }}" + loop_control: + label: "{{ item.mp }}: {{ item.tt }}" + vars: + rr: "{{ list2 | community.general.remove_keys(target=item.tt, matching_parameter=item.mp) }}" + tags: targets1 diff --git a/tests/integration/targets/filter_remove_keys/vars/main.yml b/tests/integration/targets/filter_remove_keys/vars/main.yml new file mode 100644 index 0000000000..a52d09a34a --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/vars/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +targets1: + - {mp: equal, tt: [k0_x0, k1_x1]} + - {mp: starts_with, tt: [k0, k1]} + - {mp: ends_with, tt: [x0, x1]} + - {mp: regex, tt: ['^.*[01]_x.*$']} + - {mp: regex, tt: '^.*[01]_x.*$'} + +list1: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + +result1: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + +targets2: + - {mp: equal, tt: k0_x0} + - {mp: starts_with, tt: k0} + - {mp: ends_with, tt: x0} + - {mp: regex, tt: '^.*0_x.*$'} + +list2: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + +result2: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} From d2d7deb4ecb978dd21a68b4ebd372da891ee3029 Mon Sep 17 00:00:00 2001 From: Thomas Gouverneur Date: Sat, 8 Jun 2024 14:04:59 +0200 Subject: [PATCH 115/482] #8440 Allow for API Port to be specified when using proxmox_kvm (#8441) * added api_port * added changelog fragments for #8440 * api_port minor changes - Added documentation on api_port - Fixed multiple spaces after operator - Switched from str to int * Update changelogs/fragments/8440-allow-api-port-specification.yaml Co-authored-by: Felix Fontein * Update changelogs/fragments/8440-allow-api-port-specification.yaml Co-authored-by: Felix Fontein * Update plugins/doc_fragments/proxmox.py Co-authored-by: Felix Fontein * Update plugins/doc_fragments/proxmox.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8440-allow-api-port-specification.yaml | 2 ++ plugins/doc_fragments/proxmox.py | 7 +++++++ plugins/module_utils/proxmox.py | 8 ++++++++ 3 files changed, 17 insertions(+) create mode 100644 changelogs/fragments/8440-allow-api-port-specification.yaml diff --git a/changelogs/fragments/8440-allow-api-port-specification.yaml b/changelogs/fragments/8440-allow-api-port-specification.yaml new file mode 100644 index 0000000000..646ee1ab60 --- /dev/null +++ b/changelogs/fragments/8440-allow-api-port-specification.yaml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441). diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py index cb533fefa6..239dba06da 100644 --- a/plugins/doc_fragments/proxmox.py +++ b/plugins/doc_fragments/proxmox.py @@ -16,6 +16,13 @@ options: - Specify the target host of the Proxmox VE cluster. type: str required: true + api_port: + description: + - Specify the target port of the Proxmox VE cluster. + - Uses the E(PROXMOX_PORT) environment variable if not specified. + type: int + required: false + version_added: 9.1.0 api_user: description: - Specify the user to authenticate with. diff --git a/plugins/module_utils/proxmox.py b/plugins/module_utils/proxmox.py index 5fd783d654..05bf1874b3 100644 --- a/plugins/module_utils/proxmox.py +++ b/plugins/module_utils/proxmox.py @@ -29,6 +29,9 @@ def proxmox_auth_argument_spec(): required=True, fallback=(env_fallback, ['PROXMOX_HOST']) ), + api_port=dict(type='int', + fallback=(env_fallback, ['PROXMOX_PORT']) + ), api_user=dict(type='str', required=True, fallback=(env_fallback, ['PROXMOX_USER']) @@ -82,6 +85,7 @@ class ProxmoxAnsible(object): def _connect(self): api_host = self.module.params['api_host'] + api_port = self.module.params['api_port'] api_user = self.module.params['api_user'] api_password = self.module.params['api_password'] api_token_id = self.module.params['api_token_id'] @@ -89,6 +93,10 @@ class ProxmoxAnsible(object): validate_certs = self.module.params['validate_certs'] auth_args = {'user': api_user} + + if api_port: + auth_args['port'] = api_port + if api_password: auth_args['password'] = api_password else: From 1ae6c825583771f47592e4309adf465822e7d8d8 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 10 Jun 2024 20:26:38 +0200 Subject: [PATCH 116/482] CI: Bump Azure test container to 6.0.0 (#8483) Bump Azure test container to 6.0.0. --- .azure-pipelines/azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 7dc438ad3a..754dfd0437 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -53,7 +53,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:4.0.1 + image: quay.io/ansible/azure-pipelines-test-container:6.0.0 pool: Standard From c31499a4110c3e8c6fe9fe0055a689ab95de672a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 13 Jun 2024 05:46:54 +1200 Subject: [PATCH 117/482] django_check: new module (#8454) * django_check: new module * sanity fix * working version * remove unused import * add note about the module output * add note on module failing when rc!=0 --- .github/BOTMETA.yml | 10 +- plugins/modules/django_check.py | 113 ++++++++++++++++++ .../unit/plugins/modules/test_django_check.py | 13 ++ .../plugins/modules/test_django_check.yaml | 27 +++++ 4 files changed, 159 insertions(+), 4 deletions(-) create mode 100644 plugins/modules/django_check.py create mode 100644 tests/unit/plugins/modules/test_django_check.py create mode 100644 tests/unit/plugins/modules/test_django_check.yaml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ade18c0e33..4c6a98eaef 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -506,13 +506,15 @@ files: maintainers: tintoy $modules/discord.py: maintainers: cwollinger - $modules/django_manage.py: - ignore: scottanderson42 tastychutney - labels: django_manage + $modules/django_check.py: + maintainers: russoz + $modules/django_command.py: maintainers: russoz $modules/django_createcachetable.py: maintainers: russoz - $modules/django_command.py: + $modules/django_manage.py: + ignore: scottanderson42 tastychutney + labels: django_manage maintainers: russoz $modules/dnf_versionlock.py: maintainers: moreda diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py new file mode 100644 index 0000000000..1553da7a30 --- /dev/null +++ b/plugins/modules/django_check.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: django_check +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin check) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin check). +extends_documentation_fragment: + - community.general.attributes + - community.general.django +options: + database: + description: + - Specify databases to run checks against. + - If not specified, Django will not run database tests. + type: list + elements: str + deploy: + description: + - Include additional checks relevant in a deployment setting. + type: bool + default: false + fail_level: + description: + - Message level that will trigger failure. + - Default is the Django default value. Check the documentation for the version being used. + type: str + choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] + tags: + description: + - Restrict checks to specific tags. + type: list + elements: str + apps: + description: + - Restrict checks to specific applications. + - Default is to check all applications. + type: list + elements: str +notes: + - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). + - The module will fail if RV(ignore:rc) is not zero. +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = """ +- name: Check the entire project + community.general.django_check: + settings: myproject.settings + +- name: Create the project using specific databases + community.general.django_check: + database: + - somedb + - myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = """ +run_info: + description: Command-line execution information. + type: dict + returned: success and C(verbosity) >= 3 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCheck(DjangoModuleHelper): + module = dict( + argument_spec=dict( + database=dict(type="list", elements="str"), + deploy=dict(type="bool", default=False), + fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]), + tags=dict(type="list", elements="str"), + apps=dict(type="list", elements="str"), + ), + supports_check_mode=True, + ) + arg_formats = dict( + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + deploy=cmd_runner_fmt.as_bool("--deploy"), + fail_level=cmd_runner_fmt.as_opt_val("--fail-level"), + tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"), + apps=cmd_runner_fmt.as_list(), + ) + django_admin_cmd = "check" + django_admin_arg_order = "database deploy fail_level tags apps" + + +def main(): + DjangoCheck.execute() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_django_check.py b/tests/unit/plugins/modules/test_django_check.py new file mode 100644 index 0000000000..8aec71900b --- /dev/null +++ b/tests/unit/plugins/modules/test_django_check.py @@ -0,0 +1,13 @@ +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import django_check +from .helper import Helper + + +Helper.from_module(django_check, __name__) diff --git a/tests/unit/plugins/modules/test_django_check.yaml b/tests/unit/plugins/modules/test_django_check.yaml new file mode 100644 index 0000000000..6156aaa2c2 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_check.yaml @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: success + input: + settings: whatever.settings + run_command_calls: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "whatever\n" + err: "" +- id: multiple_databases + input: + settings: whatever.settings + database: + - abc + - def + run_command_calls: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] + environ: *env-def + rc: 0 + out: "whatever\n" + err: "" From ac3c04357cb24c07c1038702df67de1cb21d710b Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Wed, 12 Jun 2024 19:47:18 +0200 Subject: [PATCH 118/482] Update docsite chapter "Merging lists of dictionaries" (#8477) * Update docs 'Merging lists of dictionaries' * Adding links to module and plugin options in docs/docsite/helper/lists_mergeby * Add subsections and improve formatting. * Add example-009 'Merge single list' * Fix licenses. * Fix variables. * Update docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 Co-authored-by: Felix Fontein * Update docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../helper/lists_mergeby/default-common.yml | 16 +- .../lists_mergeby/default-recursive-true.yml | 9 +- .../helper/lists_mergeby/example-001.yml | 2 +- .../lists_mergeby/example-001_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-002.yml | 2 +- .../lists_mergeby/example-002_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-003.yml | 2 +- .../lists_mergeby/example-003_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-004.yml | 2 +- .../lists_mergeby/example-004_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-005.yml | 2 +- .../lists_mergeby/example-005_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-006.yml | 2 +- .../lists_mergeby/example-006_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-007.yml | 2 +- .../lists_mergeby/example-007_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-008.yml | 2 +- .../lists_mergeby/example-008_vars/list3.yml | 3 +- .../helper/lists_mergeby/example-009.yml | 14 + .../example-009_vars/default-common.yml | 1 + .../lists_mergeby/example-009_vars/list3.yml | 6 + .../docsite/helper/lists_mergeby/examples.yml | 56 ++-- .../helper/lists_mergeby/examples_all.rst.j2 | 4 +- .../helper/lists_mergeby/extra-vars.yml | 7 + ...tions_merging_lists_of_dictionaries.rst.j2 | 44 ++-- .../docsite/helper/lists_mergeby/list3.out.j2 | 2 +- .../docsite/helper/lists_mergeby/playbook.yml | 10 +- ...rmations_merging_lists_of_dictionaries.rst | 248 ++++++++---------- 28 files changed, 238 insertions(+), 219 deletions(-) create mode 100644 docs/docsite/helper/lists_mergeby/example-009.yml create mode 120000 docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml create mode 100644 docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml create mode 100644 docs/docsite/helper/lists_mergeby/extra-vars.yml diff --git a/docs/docsite/helper/lists_mergeby/default-common.yml b/docs/docsite/helper/lists_mergeby/default-common.yml index fd874e5c91..4431fe27dc 100644 --- a/docs/docsite/helper/lists_mergeby/default-common.yml +++ b/docs/docsite/helper/lists_mergeby/default-common.yml @@ -2,17 +2,11 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} diff --git a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml index 133c8f2aec..eb83ea82e1 100644 --- a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml +++ b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml @@ -2,14 +2,12 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - list1: - name: myname01 param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -18,7 +16,6 @@ list2: param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] diff --git a/docs/docsite/helper/lists_mergeby/example-001.yml b/docs/docsite/helper/lists_mergeby/example-001.yml index 0cf6a9b8a7..c27b019e52 100644 --- a/docs/docsite/helper/lists_mergeby/example-001.yml +++ b/docs/docsite/helper/lists_mergeby/example-001.yml @@ -8,7 +8,7 @@ dir: example-001_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-001.out diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml index 0604feccbd..8bd8bc8f24 100644 --- a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml @@ -2,6 +2,5 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ list1| +list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-002.yml b/docs/docsite/helper/lists_mergeby/example-002.yml index 5e6e0315df..e164db1251 100644 --- a/docs/docsite/helper/lists_mergeby/example-002.yml +++ b/docs/docsite/helper/lists_mergeby/example-002.yml @@ -8,7 +8,7 @@ dir: example-002_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-002.out diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml index 8ad7524072..be6cfcbf31 100644 --- a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml @@ -2,6 +2,5 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-003.yml b/docs/docsite/helper/lists_mergeby/example-003.yml index 2f93ab8a27..cbc5e43a50 100644 --- a/docs/docsite/helper/lists_mergeby/example-003.yml +++ b/docs/docsite/helper/lists_mergeby/example-003.yml @@ -8,7 +8,7 @@ dir: example-003_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-003.out diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml index d5374eece5..2eff5df41a 100644 --- a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml @@ -2,7 +2,6 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" diff --git a/docs/docsite/helper/lists_mergeby/example-004.yml b/docs/docsite/helper/lists_mergeby/example-004.yml index 3ef067faf3..68e77dea81 100644 --- a/docs/docsite/helper/lists_mergeby/example-004.yml +++ b/docs/docsite/helper/lists_mergeby/example-004.yml @@ -8,7 +8,7 @@ dir: example-004_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-004.out diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml index a054ea1e73..94c8ceed38 100644 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-005.yml b/docs/docsite/helper/lists_mergeby/example-005.yml index 57e7a779d9..b7b81de294 100644 --- a/docs/docsite/helper/lists_mergeby/example-005.yml +++ b/docs/docsite/helper/lists_mergeby/example-005.yml @@ -8,7 +8,7 @@ dir: example-005_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-005.out diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml index 3480bf6581..f0d7751f22 100644 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-006.yml b/docs/docsite/helper/lists_mergeby/example-006.yml index 41fc88e496..1be3becbc0 100644 --- a/docs/docsite/helper/lists_mergeby/example-006.yml +++ b/docs/docsite/helper/lists_mergeby/example-006.yml @@ -8,7 +8,7 @@ dir: example-006_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-006.out diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml index 97513b5593..f555c8dcb2 100644 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-007.yml b/docs/docsite/helper/lists_mergeby/example-007.yml index 3de7158447..8a596ea68e 100644 --- a/docs/docsite/helper/lists_mergeby/example-007.yml +++ b/docs/docsite/helper/lists_mergeby/example-007.yml @@ -8,7 +8,7 @@ dir: example-007_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug|d(false) | bool - template: src: list3.out.j2 dest: example-007.out diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml index cb51653b49..d8ad16cf4d 100644 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-008.yml b/docs/docsite/helper/lists_mergeby/example-008.yml index e33828bf9a..6d5c03bc6d 100644 --- a/docs/docsite/helper/lists_mergeby/example-008.yml +++ b/docs/docsite/helper/lists_mergeby/example-008.yml @@ -8,7 +8,7 @@ dir: example-008_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-008.out diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml index af7001fc4a..b2051376ea 100644 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-009.yml b/docs/docsite/helper/lists_mergeby/example-009.yml new file mode 100644 index 0000000000..beef5d356c --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009.yml @@ -0,0 +1,14 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9. Merge single list by common attribute 'name' + include_vars: + dir: example-009_vars +- debug: + var: list3 + when: debug | d(false) | bool +- template: + src: list3.out.j2 + dest: example-009.out diff --git a/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml new file mode 120000 index 0000000000..7ea8984a8d --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml @@ -0,0 +1 @@ +../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml new file mode 100644 index 0000000000..1708e3bafa --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" diff --git a/docs/docsite/helper/lists_mergeby/examples.yml b/docs/docsite/helper/lists_mergeby/examples.yml index 83b985084e..34ad2d1558 100644 --- a/docs/docsite/helper/lists_mergeby/examples.yml +++ b/docs/docsite/helper/lists_mergeby/examples.yml @@ -4,51 +4,75 @@ # SPDX-License-Identifier: GPL-3.0-or-later examples: - - label: 'In the example below the lists are merged by the attribute ``name``:' + - title: Two lists + description: 'In the example below the lists are merged by the attribute ``name``:' file: example-001_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-001.out lang: 'yaml' - - label: 'It is possible to use a list of lists as an input of the filter:' + - title: List of two lists + description: 'It is possible to use a list of lists as an input of the filter:' file: example-002_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces the same result as in the previous example:' + - title: + description: 'This produces the same result as in the previous example:' file: example-002.out lang: 'yaml' - - label: 'Example ``list_merge=replace`` (default):' + - title: Single list + description: 'It is possible to merge single list:' + file: example-009_vars/list3.yml + lang: 'yaml+jinja' + - title: + description: 'This produces the same result as in the previous example:' + file: example-009.out + lang: 'yaml' + - title: list_merge=replace (default) + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default):' file: example-003_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-003.out lang: 'yaml' - - label: 'Example ``list_merge=keep``:' + - title: list_merge=keep + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`:' file: example-004_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-004.out lang: 'yaml' - - label: 'Example ``list_merge=append``:' + - title: list_merge=append + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`:' file: example-005_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-005.out lang: 'yaml' - - label: 'Example ``list_merge=prepend``:' + - title: list_merge=prepend + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`:' file: example-006_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-006.out lang: 'yaml' - - label: 'Example ``list_merge=append_rp``:' + - title: list_merge=append_rp + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`:' file: example-007_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-007.out lang: 'yaml' - - label: 'Example ``list_merge=prepend_rp``:' + - title: list_merge=prepend_rp + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`:' file: example-008_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-008.out lang: 'yaml' diff --git a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 index 95a0fafddc..88098683b9 100644 --- a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 +++ b/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 @@ -4,10 +4,10 @@ SPDX-License-Identifier: GPL-3.0-or-later {% for i in examples %} -{{ i.label }} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/extra-vars.yml b/docs/docsite/helper/lists_mergeby/extra-vars.yml new file mode 100644 index 0000000000..0482c7ff29 --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/extra-vars.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +examples_one: true +examples_all: true +merging_lists_of_dictionaries: true diff --git a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 index 71d0d5da6c..ad74161dcd 100644 --- a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 +++ b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 @@ -6,57 +6,69 @@ Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml - {{ lookup('file', 'default-common.yml')|indent(2) }} + {{ lookup('file', 'default-common.yml') | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% for i in examples[0:2] %} -{{ i.label }} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} .. versionadded:: 2.0.0 -{% for i in examples[2:4] %} -{{ i.label }} +{% for i in examples[2:6] %} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. +The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. **recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. **list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. + Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists. -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. +The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options. Let us use the lists below in the following examples .. code-block:: yaml - {{ lookup('file', 'default-recursive-true.yml')|indent(2) }} + {{ lookup('file', 'default-recursive-true.yml') | split('\n') | reject('match', '^(#|---)') | join ('\n') |indent(2) }} -{% for i in examples[4:16] %} -{{ i.label }} +{% for i in examples[6:] %} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') |indent(2) }} {% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/list3.out.j2 b/docs/docsite/helper/lists_mergeby/list3.out.j2 index b51f6b8681..a30a5c4ab0 100644 --- a/docs/docsite/helper/lists_mergeby/list3.out.j2 +++ b/docs/docsite/helper/lists_mergeby/list3.out.j2 @@ -4,4 +4,4 @@ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://w SPDX-License-Identifier: GPL-3.0-or-later #} list3: -{{ list3|to_nice_yaml(indent=0) }} + {{ list3 | to_yaml(indent=2, sort_keys=false) | indent(2) }} diff --git a/docs/docsite/helper/lists_mergeby/playbook.yml b/docs/docsite/helper/lists_mergeby/playbook.yml index 793d233485..ab389fa129 100644 --- a/docs/docsite/helper/lists_mergeby/playbook.yml +++ b/docs/docsite/helper/lists_mergeby/playbook.yml @@ -5,7 +5,7 @@ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # 1) Run all examples and create example-XXX.out -# shell> ansible-playbook playbook.yml -e examples=true +# shell> ansible-playbook playbook.yml -e examples_one=true # # 2) Optionally, for testing, create examples_all.rst # shell> ansible-playbook playbook.yml -e examples_all=true @@ -45,18 +45,20 @@ tags: t007 - import_tasks: example-008.yml tags: t008 - when: examples|d(false)|bool + - import_tasks: example-009.yml + tags: t009 + when: examples_one | d(false) | bool - block: - include_vars: examples.yml - template: src: examples_all.rst.j2 dest: examples_all.rst - when: examples_all|d(false)|bool + when: examples_all | d(false) | bool - block: - include_vars: examples.yml - template: src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst - when: merging_lists_of_dictionaries|d(false)|bool + when: merging_lists_of_dictionaries | d(false) | bool diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst index 06fa79d16a..cafe04e5c4 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst @@ -6,33 +6,30 @@ Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby filter `. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} +Two lists +""""""""" In the example below the lists are merged by the attribute ``name``: .. code-block:: yaml+jinja - list3: "{{ list1| + list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" This produces: @@ -40,24 +37,21 @@ This produces: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} .. versionadded:: 2.0.0 +List of two lists +""""""""""""""""" It is possible to use a list of lists as an input of the filter: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" This produces the same result as in the previous example: @@ -65,15 +59,29 @@ This produces the same result as in the previous example: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + +Single list +""""""""""" +It is possible to merge single list: + +.. code-block:: yaml+jinja + + list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. @@ -95,8 +103,7 @@ Let us use the lists below in the following examples param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -105,16 +112,17 @@ Let us use the lists below in the following examples param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] +list_merge=replace (default) +"""""""""""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default): .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" @@ -123,25 +131,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4] +list_merge=keep +""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" @@ -151,25 +156,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3] +list_merge=append +""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" @@ -179,30 +181,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 3, 4, 4] +list_merge=prepend +"""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" @@ -212,30 +206,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2, 3] +list_merge=append_rp +"""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" @@ -245,29 +231,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 4, 4] +list_merge=prepend_rp +""""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" @@ -277,21 +256,12 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2] From 800bc0111277770bff3b26ff527431d57dadb8ff Mon Sep 17 00:00:00 2001 From: joris <5111464+tyxieblub@users.noreply.github.com> Date: Wed, 12 Jun 2024 19:47:50 +0200 Subject: [PATCH 119/482] feat(redis_info): add option to fetch cluster info (#8464) * feat(redis_info): add option to fetch cluster info * add changelog fragment * update description Co-authored-by: Felix Fontein * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8464-redis-add-cluster-info.yml | 2 + plugins/modules/redis_info.py | 48 ++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8464-redis-add-cluster-info.yml diff --git a/changelogs/fragments/8464-redis-add-cluster-info.yml b/changelogs/fragments/8464-redis-add-cluster-info.yml new file mode 100644 index 0000000000..921307d716 --- /dev/null +++ b/changelogs/fragments/8464-redis-add-cluster-info.yml @@ -0,0 +1,2 @@ +minor_changes: + - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py index f352d53d79..c75abcf212 100644 --- a/plugins/modules/redis_info.py +++ b/plugins/modules/redis_info.py @@ -30,6 +30,11 @@ options: version_added: 7.5.0 ca_certs: version_added: 7.5.0 + cluster: + default: false + description: Get informations about cluster status as RV(cluster). + type: bool + version_added: 9.1.0 seealso: - module: community.general.redis author: "Pavlo Bashynskyi (@levonet)" @@ -43,6 +48,15 @@ EXAMPLES = r''' - name: Print server information ansible.builtin.debug: var: result.info + +- name: Get server cluster information + community.general.redis_info: + cluster: true + register: result + +- name: Print server cluster information + ansible.builtin.debug: + var: result.cluster_info ''' RETURN = r''' @@ -178,6 +192,25 @@ info: "used_memory_scripts_human": "0B", "used_memory_startup": 791264 } +cluster: + description: The default set of cluster information sections U(https://redis.io/commands/cluster-info). + returned: success if O(cluster=true) + version_added: 9.1.0 + type: dict + sample: { + "cluster_state": ok, + "cluster_slots_assigned": 16384, + "cluster_slots_ok": 16384, + "cluster_slots_pfail": 0, + "cluster_slots_fail": 0, + "cluster_known_nodes": 6, + "cluster_size": 3, + "cluster_current_epoch": 6, + "cluster_my_epoch": 2, + "cluster_stats_messages_sent": 1483972, + "cluster_stats_messages_received": 1483968, + "total_cluster_links_buffer_limit_exceeded": 0 + } ''' import traceback @@ -202,14 +235,19 @@ def redis_client(**client_params): # Module execution. def main(): + module_args = dict( + cluster=dict(type='bool', default=False), + ) + module_args.update(redis_auth_argument_spec(tls_default=False)) module = AnsibleModule( - argument_spec=redis_auth_argument_spec(tls_default=False), + argument_spec=module_args, supports_check_mode=True, ) fail_imports(module, module.params['tls']) redis_params = redis_auth_params(module) + cluster = module.params['cluster'] # Connect and check client = redis_client(**redis_params) @@ -219,7 +257,13 @@ def main(): module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) info = client.info() - module.exit_json(changed=False, info=info) + + result = dict(changed=False, info=info) + + if cluster: + result['cluster_info'] = client.execute_command('CLUSTER INFO') + + module.exit_json(**result) if __name__ == '__main__': From 8f60f3aef925c327dc00fdde4f1d894461aade9d Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Thu, 13 Jun 2024 07:35:32 +0200 Subject: [PATCH 120/482] Update docs lists_mergeby (#8475) * Fix #8474. Complete examples and documentation of lists_mergeby * Fix docs syntax O(_input) * Update docs. * Update plugins/filter/lists_mergeby.py Co-authored-by: Felix Fontein * Update plugins/filter/lists_mergeby.py Co-authored-by: Felix Fontein * Update plugins/filter/lists_mergeby.py Co-authored-by: Felix Fontein * Update plugins/filter/lists_mergeby.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/filter/lists_mergeby.py | 212 +++++++++++++++++++++++--------- 1 file changed, 153 insertions(+), 59 deletions(-) diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index caf183492c..0e47d50172 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2020-2022, Vladimir Botka +# Copyright (c) 2020-2024, Vladimir Botka # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -12,22 +12,32 @@ DOCUMENTATION = ''' version_added: 2.0.0 author: Vladimir Botka (@vbotka) description: - - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters O(recursive) and O(list_merge) see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". + - Merge two or more lists by attribute O(index). Optional + parameters O(recursive) and O(list_merge) control the merging of + the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and + O(list_merge) see Ansible User's Guide chapter "Using filters to + manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the + filter P(ansible.builtin.combine#filter). + positional: another_list, index options: _input: - description: A list of dictionaries. + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) + because all elements of O(_input) can be either dictionaries + or lists. type: list - elements: dictionary + elements: raw required: true another_list: - description: Another list of dictionaries. This parameter can be specified multiple times. + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. type: list - elements: dictionary + elements: raw index: description: - The dictionary key that must be present in every dictionary in every list that is used to @@ -55,40 +65,134 @@ DOCUMENTATION = ''' ''' EXAMPLES = ''' -- name: Merge two lists +# Some results below are manually formatted for better readability. The +# dictionaries' keys will be sorted alphabetically in real output. + +- name: Example 1. Merge two lists. The results r1 and r2 are the same. ansible.builtin.debug: - msg: >- - {{ list1 | community.general.lists_mergeby( - list2, - 'index', - recursive=True, - list_merge='append' - ) }}" + msg: | + r1: {{ r1 }} + r2: {{ r2 }} vars: list1: - - index: a - value: 123 - - index: b - value: 42 + - {index: a, value: 123} + - {index: b, value: 4} list2: - - index: a - foo: bar - - index: c - foo: baz - # Produces the following list of dictionaries: - # { - # "index": "a", - # "foo": "bar", - # "value": 123 - # }, - # { - # "index": "b", - # "value": 42 - # }, - # { - # "index": "c", - # "foo": "baz" - # } + - {index: a, foo: bar} + - {index: c, foo: baz} + r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}" + r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r1: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# r2: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} + +- name: Example 2. Merge three lists + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + list3: + - {index: d, foo: qux} + r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 3. Merge single list. The result is the same as 2. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + - {index: a, foo: bar} + - {index: c, foo: baz} + - {index: d, foo: qux} + r: "{{ [list1, []] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 4. Merge two lists. By default, replace nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: [Y1, Y2]} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 5. Merge two lists. Append nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}" + +# r: +# - {index: a, foo: [X1, X2, Y1, Y2]} +# - {index: b, foo: [X1, X2, Y1, Y2]} + +- name: Example 6. Merge two lists. By default, do not merge nested dictionaries. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: {y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 7. Merge two lists. Merge nested dictionaries too. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}" + +# r: +# - {index: a, foo: {x:1, y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} ''' RETURN = ''' @@ -108,13 +212,14 @@ from operator import itemgetter def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used. - This function is used by the function lists_mergeby. + '''Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. ''' d = defaultdict(dict) - for l in (x, y): - for elem in l: + for lst in (x, y): + for elem in lst: if not isinstance(elem, Mapping): msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) @@ -124,20 +229,9 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'): def lists_mergeby(*terms, **kwargs): - ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - - Example: - - debug: - msg: "{{ list1| - community.general.lists_mergeby(list2, - 'index', - recursive=True, - list_merge='append')| - list }}" + '''Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. ''' recursive = kwargs.pop('recursive', False) @@ -155,7 +249,7 @@ def lists_mergeby(*terms, **kwargs): "must be lists. %s is %s") raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: - if all(isinstance(l, Sequence) for l in sublist): + if all(isinstance(lst, Sequence) for lst in sublist): for item in sublist: flat_list.append(item) else: From f0940d82dc53f843e598d073a7ed102f8e50e628 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 Jun 2024 21:54:42 +0200 Subject: [PATCH 121/482] homectl, udm_user: guard crypt imports (#8497) Guard crypt import. --- changelogs/fragments/8497-crypt.yml | 3 +++ plugins/modules/homectl.py | 25 +++++++++++++++++++++++-- plugins/modules/udm_user.py | 26 ++++++++++++++++++++++++-- 3 files changed, 50 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8497-crypt.yml diff --git a/changelogs/fragments/8497-crypt.yml b/changelogs/fragments/8497-crypt.yml new file mode 100644 index 0000000000..f77f6c20f9 --- /dev/null +++ b/changelogs/fragments/8497-crypt.yml @@ -0,0 +1,3 @@ +known_issues: + - "homectl - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8497)." + - "udm_user - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8497)." diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index ca4c19a875..7751651c85 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -17,6 +17,12 @@ short_description: Manage user accounts with systemd-homed version_added: 4.4.0 description: - Manages a user's home directory managed by systemd-homed. +notes: + - This module does B(not) work with Python 3.13 or newer. It uses the deprecated L(crypt Python module, + https://docs.python.org/3.12/library/crypt.html) from the Python standard library, which was removed + from Python 3.13. +requirements: + - Python 3.12 or earlier extends_documentation_fragment: - community.general.attributes attributes: @@ -263,12 +269,21 @@ data: } ''' -import crypt import json -from ansible.module_utils.basic import AnsibleModule +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.basic import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + class Homectl(object): '''#TODO DOC STRINGS''' @@ -591,6 +606,12 @@ def main(): ] ) + if not HAS_CRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of Python 3.13 standard library)'), + exception=CRYPT_IMPORT_ERROR, + ) + homectl = Homectl(module) homectl.result['state'] = homectl.state diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index dcbf0ec85e..5a2e090497 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -20,6 +20,12 @@ description: - "This module allows to manage posix users on a univention corporate server (UCS). It uses the python API of the UCS to create a new object or edit it." +notes: + - This module does B(not) work with Python 3.13 or newer. It uses the deprecated L(crypt Python module, + https://docs.python.org/3.12/library/crypt.html) from the Python standard library, which was removed + from Python 3.13. +requirements: + - Python 3.12 or earlier extends_documentation_fragment: - community.general.attributes attributes: @@ -324,10 +330,10 @@ EXAMPLES = ''' RETURN = '''# ''' -import crypt from datetime import date, timedelta +import traceback -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, @@ -335,6 +341,15 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i base_dn, ) +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + def main(): expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") @@ -451,6 +466,13 @@ def main(): ('state', 'present', ['firstname', 'lastname', 'password']) ]) ) + + if not HAS_CRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of Python 3.13 standard library)'), + exception=CRYPT_IMPORT_ERROR, + ) + username = module.params['username'] position = module.params['position'] ou = module.params['ou'] From 49e2a8633e3448eeaef24a01a305ba4e3e4a7235 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 Jun 2024 22:37:33 +0200 Subject: [PATCH 122/482] Add Python 3.13 to CI (#8500) Add Python 3.13 to CI. --- .azure-pipelines/azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 754dfd0437..3f9293ac10 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -127,6 +127,7 @@ stages: - test: '3.10' - test: '3.11' - test: '3.12' + - test: '3.13' - stage: Units_2_17 displayName: Units 2.17 dependsOn: [] @@ -354,6 +355,7 @@ stages: targets: - test: '3.8' - test: '3.11' + - test: '3.13' - stage: Generic_2_17 displayName: Generic 2.17 dependsOn: [] From 71f9674835d4301a27f04131c20ede47d3bf7dd7 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 14 Jun 2024 17:46:56 +1200 Subject: [PATCH 123/482] cmd_runner mod util: improvements (#8479) * deprecate ignore_none in context * add changelog frag * raise deprecation notice when passing ignore_value_none to context * simplify deprecation logic --- .../fragments/8479-cmdrunner-improvements.yml | 4 +++ plugins/module_utils/cmd_runner.py | 26 ++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8479-cmdrunner-improvements.yml diff --git a/changelogs/fragments/8479-cmdrunner-improvements.yml b/changelogs/fragments/8479-cmdrunner-improvements.yml new file mode 100644 index 0000000000..075f5f5cd6 --- /dev/null +++ b/changelogs/fragments/8479-cmdrunner-improvements.yml @@ -0,0 +1,4 @@ +deprecated_features: + - CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). +minor_changes: + - CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index aab654f76f..8b4e76ba7c 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -89,12 +89,15 @@ class FormatError(CmdRunnerException): class _ArgFormat(object): + # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0 def __init__(self, func, ignore_none=None, ignore_missing_value=False): self.func = func self.ignore_none = ignore_none self.ignore_missing_value = ignore_missing_value + # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0 def __call__(self, value, ctx_ignore_none=True): + # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0 ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none if value is None and ignore_none: return [] @@ -227,7 +230,11 @@ class CmdRunner(object): self.default_args_order = self._prepare_args_order(default_args_order) if arg_formats is None: arg_formats = {} - self.arg_formats = dict(arg_formats) + self.arg_formats = {} + for fmt_name, fmt in arg_formats.items(): + if not isinstance(fmt, _ArgFormat): + fmt = _Format.as_func(func=fmt, ignore_none=True) + self.arg_formats[fmt_name] = fmt self.check_rc = check_rc self.force_lang = force_lang self.path_prefix = path_prefix @@ -246,7 +253,16 @@ class CmdRunner(object): def binary(self): return self.command[0] - def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs): + # remove parameter ignore_value_none in community.general 12.0.0 + def __call__(self, args_order=None, output_process=None, ignore_value_none=None, check_mode_skip=False, check_mode_return=None, **kwargs): + if ignore_value_none is None: + ignore_value_none = True + else: + self.module.deprecate( + "Using ignore_value_none when creating the runner context is now deprecated, " + "and the parameter will be removed in community.general 12.0.0. ", + version="12.0.0", collection_name="community.general" + ) if output_process is None: output_process = _process_as_is if args_order is None: @@ -258,7 +274,7 @@ class CmdRunner(object): return _CmdRunnerContext(runner=self, args_order=args_order, output_process=output_process, - ignore_value_none=ignore_value_none, + ignore_value_none=ignore_value_none, # DEPRECATION: remove in community.general 12.0.0 check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) @@ -274,6 +290,7 @@ class _CmdRunnerContext(object): self.runner = runner self.args_order = tuple(args_order) self.output_process = output_process + # DEPRECATION: parameter ignore_value_none at the context level is deprecated and will be removed in community.general 12.0.0 self.ignore_value_none = ignore_value_none self.check_mode_skip = check_mode_skip self.check_mode_return = check_mode_return @@ -313,6 +330,7 @@ class _CmdRunnerContext(object): value = named_args[arg_name] elif not runner.arg_formats[arg_name].ignore_missing_value: raise MissingArgumentValue(self.args_order, arg_name) + # DEPRECATION: remove parameter ctx_ignore_none in 12.0.0 self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none)) except MissingArgumentValue: raise @@ -329,7 +347,7 @@ class _CmdRunnerContext(object): @property def run_info(self): return dict( - ignore_value_none=self.ignore_value_none, + ignore_value_none=self.ignore_value_none, # DEPRECATION: remove in community.general 12.0.0 check_rc=self.check_rc, environ_update=self.environ_update, args_order=self.args_order, From 2574cb0dea23008be2cc158310523b00e72354d4 Mon Sep 17 00:00:00 2001 From: Jan Wenzel Date: Fri, 14 Jun 2024 07:47:28 +0200 Subject: [PATCH 124/482] feat: proxmox_vm_info - add network information for guests (#8471) * feat: add network information for guests - Uses agent information for qemu-vms - Uses network information for lxc container * chore: add changelog fragment * fix: change default, add doc * chore: clarify doc * chore: add optional , * chore: fix pep8 indentation warning * Update plugins/modules/proxmox_vm_info.py Co-authored-by: Felix Fontein * Update plugins/modules/proxmox_vm_info.py Co-authored-by: Felix Fontein --------- Co-authored-by: Jan Wenzel Co-authored-by: Felix Fontein --- .../8471-proxmox-vm-info-network.yml | 2 ++ plugins/modules/proxmox_vm_info.py | 33 ++++++++++++++----- 2 files changed, 26 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/8471-proxmox-vm-info-network.yml diff --git a/changelogs/fragments/8471-proxmox-vm-info-network.yml b/changelogs/fragments/8471-proxmox-vm-info-network.yml new file mode 100644 index 0000000000..f658b78831 --- /dev/null +++ b/changelogs/fragments/8471-proxmox-vm-info-network.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471). diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py index 39d8307a43..e10b9dff6f 100644 --- a/plugins/modules/proxmox_vm_info.py +++ b/plugins/modules/proxmox_vm_info.py @@ -57,6 +57,13 @@ options: - pending default: none version_added: 8.1.0 + network: + description: + - Whether to retrieve the current network status. + - Requires enabled/running qemu-guest-agent on qemu VMs. + type: bool + default: false + version_added: 9.1.0 extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation @@ -172,7 +179,7 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible): msg="Failed to retrieve VMs information from cluster resources: %s" % e ) - def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None): + def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None, network=False): # Leave in dict only machines that user wants to know about filtered_vms = { vm: info for vm, info in cluster_machines.items() if not ( @@ -201,17 +208,23 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible): config_type = 0 if config == "pending" else 1 # GET /nodes/{node}/qemu/{vmid}/config current=[0/1] desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type) + if network: + if type == "qemu": + desired_vm["network"] = call_vm_getter(this_vm_id).agent("network-get-interfaces").get()['result'] + elif type == "lxc": + desired_vm["network"] = call_vm_getter(this_vm_id).interfaces.get() + return filtered_vms - def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None): + def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): try: - return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config) + return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config, network) except Exception as e: self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e) - def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None): + def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): try: - return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config) + return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config, network) except Exception as e: self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e) @@ -229,6 +242,7 @@ def main(): type="str", choices=["none", "current", "pending"], default="none", required=False ), + network=dict(type="bool", default=False, required=False), ) module_args.update(vm_info_args) @@ -245,6 +259,7 @@ def main(): vmid = module.params["vmid"] name = module.params["name"] config = module.params["config"] + network = module.params["network"] result = dict(changed=False) @@ -256,12 +271,12 @@ def main(): vms = {} if type == "lxc": - vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config) + vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network) elif type == "qemu": - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config) + vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) else: - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config) - vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config)) + vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) + vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network)) result["proxmox_vms"] = [info for vm, info in sorted(vms.items())] module.exit_json(**result) From 1d61541951a3ec3ecc5417bdd97ca2b1e9aca698 Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Fri, 14 Jun 2024 21:54:58 +0200 Subject: [PATCH 125/482] Feature filter replace_keys (#8446) * Add filter replace_keys. * Update examples and integration tests. * Fix examples and copyright. * Update documentation, examples and integration tests. * Implement #8445. Add filter replace_keys * Fix documentation formatting. * Fix documentation. * Fix type(target). Formatting improved. * Instead of a dictionary, _keys_filter_target_dict returns a list * No target testing in _keys_filter_params * Interface changed _keys_filter_params(data, matching_parameter) * If there are items with equal C(before) the B(first) one will be used. * Update remove_keys. Interface changed _keys_filter_params(data, matching_parameter) * The target can't be empty also in _keys_filter_target_dict * Update plugins/filter/replace_keys.py Co-authored-by: Felix Fontein * Update plugins/filter/replace_keys.py Co-authored-by: Felix Fontein * Update plugins/filter/replace_keys.py Co-authored-by: Felix Fontein * Test attributes before and after are strings in the iteration of target. * Update plugins/filter/replace_keys.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/filter/keep_keys.py | 2 +- plugins/filter/remove_keys.py | 2 +- plugins/filter/replace_keys.py | 180 ++++++++++++++++++ plugins/plugin_utils/keys_filter.py | 99 ++++++---- .../targets/filter_replace_keys/aliases | 5 + .../tasks/fn-test-replace_keys.yml | 21 ++ .../filter_replace_keys/tasks/main.yml | 7 + .../tasks/replace_keys.yml | 56 ++++++ .../targets/filter_replace_keys/vars/main.yml | 58 ++++++ 10 files changed, 397 insertions(+), 35 deletions(-) create mode 100644 plugins/filter/replace_keys.py create mode 100644 tests/integration/targets/filter_replace_keys/aliases create mode 100644 tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml create mode 100644 tests/integration/targets/filter_replace_keys/tasks/main.yml create mode 100644 tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml create mode 100644 tests/integration/targets/filter_replace_keys/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 4c6a98eaef..36d667706a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -174,6 +174,8 @@ files: $filters/random_mac.py: {} $filters/remove_keys.py: maintainers: vbotka + $filters/replace_keys.py: + maintainers: vbotka $filters/time.py: maintainers: resmo $filters/to_days.yml: diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py index 009e986ab2..dffccba356 100644 --- a/plugins/filter/keep_keys.py +++ b/plugins/filter/keep_keys.py @@ -110,7 +110,7 @@ def keep_keys(data, target=None, matching_parameter='equal'): """keep specific keys from dictionaries in a list""" # test parameters - _keys_filter_params(data, target, matching_parameter) + _keys_filter_params(data, matching_parameter) # test and transform target tt = _keys_filter_target_str(target, matching_parameter) diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py index 335f82d31f..cabce14682 100644 --- a/plugins/filter/remove_keys.py +++ b/plugins/filter/remove_keys.py @@ -110,7 +110,7 @@ def remove_keys(data, target=None, matching_parameter='equal'): """remove specific keys from dictionaries in a list""" # test parameters - _keys_filter_params(data, target, matching_parameter) + _keys_filter_params(data, matching_parameter) # test and transform target tt = _keys_filter_target_str(target, matching_parameter) diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py new file mode 100644 index 0000000000..d3b12c05d0 --- /dev/null +++ b/plugins/filter/replace_keys.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: replace_keys + short_description: Replace specific keys in a list of dictionaries + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter replaces specified keys in a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: + description: + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) will be used. + type: str + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default, replace keys that are equal any of the attributes before. + t: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t) }}" + + # 2) Replace keys that starts with any of the attributes before. + t: + - {before: k0, after: a0} + - {before: k1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Replace keys that ends with any of the attributes before. + t: + - {before: x0, after: a0} + - {before: x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Replace keys that match any regex of the attributes before. + t: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-4 are all the same. + r: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + + # 5) If more keys match the same attribute before the last one will be used. + t: + - {before: "^.*_x.*$", after: X} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + + r: + - X: foo + - X: bar + + # 6) If there are items with equal attribute before the first one will be used. + t: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + + r: + - X: foo + - X: bar + + # 7) If there are more matches for a key the first one will be used. + l: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + t: + - {before: a, after: X} + - {before: aa, after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # gives + + r: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_dict) + + +def replace_keys(data, target=None, matching_parameter='equal'): + """replace specific keys in a list of dictionaries""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tz = _keys_filter_target_dict(target, matching_parameter) + + if matching_parameter == 'equal': + def replace_key(key): + for b, a in tz: + if key == b: + return a + return key + elif matching_parameter == 'starts_with': + def replace_key(key): + for b, a in tz: + if key.startswith(b): + return a + return key + elif matching_parameter == 'ends_with': + def replace_key(key): + for b, a in tz: + if key.endswith(b): + return a + return key + elif matching_parameter == 'regex': + def replace_key(key): + for b, a in tz: + if b.match(key): + return a + return key + + return [dict((replace_key(k), v) for k, v in d.items()) for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'replace_keys': replace_keys, + } diff --git a/plugins/plugin_utils/keys_filter.py b/plugins/plugin_utils/keys_filter.py index 37b7611c50..94234a15db 100644 --- a/plugins/plugin_utils/keys_filter.py +++ b/plugins/plugin_utils/keys_filter.py @@ -13,11 +13,10 @@ from ansible.module_utils.six import string_types from ansible.module_utils.common._collections_compat import Mapping, Sequence -def _keys_filter_params(data, target, matching_parameter): +def _keys_filter_params(data, matching_parameter): """test parameters: - * data must be a list of dictionaries. All keys must be strings. - * target must be a non-empty sequence. - * matching_parameter is member of a list. + * data must be a list of dictionaries. All keys must be strings. + * matching_parameter is member of a list. """ mp = matching_parameter @@ -37,30 +36,32 @@ def _keys_filter_params(data, target, matching_parameter): msg = "Top level keys must be strings. keys: %s" raise AnsibleFilterError(msg % elem.keys()) - if not isinstance(target, Sequence): - msg = ("The target must be a string or a list. target is %s.") - raise AnsibleFilterError(msg % target) - - if len(target) == 0: - msg = ("The target can't be empty.") - raise AnsibleFilterError(msg) - if mp not in ml: - msg = ("The matching_parameter must be one of %s. matching_parameter is %s") + msg = "The matching_parameter must be one of %s. matching_parameter=%s" raise AnsibleFilterError(msg % (ml, mp)) return def _keys_filter_target_str(target, matching_parameter): - """test: - * If target is list all items are strings - * If matching_parameter=regex target is a string or list with single string - convert and return: - * tuple of unique target items, or - * tuple with single item, or - * compiled regex if matching_parameter=regex """ + Test: + * target is a non-empty string or list. + * If target is list all items are strings. + * target is a string or list with single string if matching_parameter=regex. + Convert target and return: + * tuple of unique target items, or + * tuple with single item, or + * compiled regex if matching_parameter=regex. + """ + + if not isinstance(target, Sequence): + msg = "The target must be a string or a list. target is %s." + raise AnsibleFilterError(msg % type(target)) + + if len(target) == 0: + msg = "The target can't be empty." + raise AnsibleFilterError(msg) if isinstance(target, list): for elem in target: @@ -73,15 +74,14 @@ def _keys_filter_target_str(target, matching_parameter): r = target else: if len(target) > 1: - msg = ("Single item is required in the target list if matching_parameter is regex.") + msg = "Single item is required in the target list if matching_parameter=regex." raise AnsibleFilterError(msg) else: r = target[0] try: tt = re.compile(r) except re.error: - msg = ("The target must be a valid regex if matching_parameter is regex." - " target is %s") + msg = "The target must be a valid regex if matching_parameter=regex. target is %s" raise AnsibleFilterError(msg % r) elif isinstance(target, string_types): tt = (target, ) @@ -92,17 +92,50 @@ def _keys_filter_target_str(target, matching_parameter): def _keys_filter_target_dict(target, matching_parameter): - """test: - * target is a list of dictionaries - * ... + """ + Test: + * target is a list of dictionaries with attributes 'after' and 'before'. + * Attributes 'before' must be valid regex if matching_parameter=regex. + * Otherwise, the attributes 'before' must be strings. + Convert target and return: + * iterator that aggregates attributes 'before' and 'after', or + * iterator that aggregates compiled regex of attributes 'before' and 'after' if matching_parameter=regex. """ - # TODO: Complete and use this in filter replace_keys + if not isinstance(target, list): + msg = "The target must be a list. target is %s." + raise AnsibleFilterError(msg % (target, type(target))) - if isinstance(target, list): - for elem in target: - if not isinstance(elem, Mapping): - msg = "The target items must be dictionary. %s is %s" - raise AnsibleFilterError(msg % (elem, type(elem))) + if len(target) == 0: + msg = "The target can't be empty." + raise AnsibleFilterError(msg) - return + for elem in target: + if not isinstance(elem, Mapping): + msg = "The target items must be dictionaries. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + if not all(k in elem for k in ('before', 'after')): + msg = "All dictionaries in target must include attributes: after, before." + raise AnsibleFilterError(msg) + if not isinstance(elem['before'], string_types): + msg = "The attributes before must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem['before'], type(elem['before']))) + if not isinstance(elem['after'], string_types): + msg = "The attributes after must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem['after'], type(elem['after']))) + + before = [d['before'] for d in target] + after = [d['after'] for d in target] + + if matching_parameter == 'regex': + try: + tr = map(re.compile, before) + tz = list(zip(tr, after)) + except re.error: + msg = ("The attributes before must be valid regex if matching_parameter=regex." + " Not all items are valid regex in: %s") + raise AnsibleFilterError(msg % before) + else: + tz = list(zip(before, after)) + + return tz diff --git a/tests/integration/targets/filter_replace_keys/aliases b/tests/integration/targets/filter_replace_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml b/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml new file mode 100644 index 0000000000..e324376a5a --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml @@ -0,0 +1,21 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test replace keys + ansible.builtin.assert: + that: + - (rr | difference(item.result) | length) == 0 + success_msg: | + [OK] {{ item.label }} + result: + {{ rr | to_nice_yaml(indent=2) | indent(2) }} + fail_msg: | + [ERR] {{ item.label }} + result: + {{ rr | to_nice_yaml(indent=2) | indent(2) }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ item.data | + community.general.replace_keys(target=item.target, matching_parameter=item.match) }}" diff --git a/tests/integration/targets/filter_replace_keys/tasks/main.yml b/tests/integration/targets/filter_replace_keys/tasks/main.yml new file mode 100644 index 0000000000..35addaf946 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test replace_keys + import_tasks: replace_keys.yml diff --git a/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml b/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml new file mode 100644 index 0000000000..a57921b81b --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml @@ -0,0 +1,56 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quiet_test | d(true) | bool + tags: ansible_version + +- name: Test replace keys equal (default) + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + vars: + rr: "{{ list1 | community.general.replace_keys(target=tt) }}" + tt: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + tags: equal_default + +- name: Test replace keys targets1 + ansible.builtin.assert: + that: + - (rr | difference(result1) | length) == 0 + success_msg: | + [OK] result: + {{ rr | to_yaml }} + fail_msg: | + [ERR] result: + {{ rr | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ targets1 | dict2items }}" + loop_control: + label: "{{ item.key }}" + vars: + rr: "{{ list1 | community.general.replace_keys(target=item.value, matching_parameter=item.key) }}" + tags: targets1 + +- name: Test replace keys targets2 + include_tasks: + file: fn-test-replace_keys.yml + apply: + tags: targets2 + loop: "{{ targets2 }}" + loop_control: + label: "{{ item.label }}" + tags: targets2 diff --git a/tests/integration/targets/filter_replace_keys/vars/main.yml b/tests/integration/targets/filter_replace_keys/vars/main.yml new file mode 100644 index 0000000000..167e083960 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/vars/main.yml @@ -0,0 +1,58 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +list1: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + +result1: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + +targets1: + equal: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + starts_with: + - {before: k0, after: a0} + - {before: k1, after: a1} + ends_with: + - {before: x0, after: a0} + - {before: x1, after: a1} + regex: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + +list2: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + +targets2: + - label: If more keys match the same attribute before the last one will be used. + match: regex + target: + - {before: "^.*_x.*$", after: X} + data: "{{ list1 }}" + result: + - X: foo + - X: bar + - label: If there are items with equal attribute before the first one will be used. + match: regex + target: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + data: "{{ list1 }}" + result: + - X: foo + - X: bar + - label: If there are more matches for a key the first one will be used. + match: starts_with + target: + - {before: a, after: X} + - {before: aa, after: Y} + data: "{{ list2 }}" + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} From d95f4d68a376c66cb11a62599ad08681321804f2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 15 Jun 2024 21:43:26 +1200 Subject: [PATCH 126/482] fix version vardict was introduced (#8509) --- docs/docsite/rst/guide_vardict.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst index e870bf175c..f65b09055b 100644 --- a/docs/docsite/rst/guide_vardict.rst +++ b/docs/docsite/rst/guide_vardict.rst @@ -173,4 +173,4 @@ values of it. For that, you want to use: results["diff"] = vars.diff() module.exit_json(**results) -.. versionadded:: 6.1.0 +.. versionadded:: 7.1.0 From 03966624ba1e647238d8807a8da89615760d1068 Mon Sep 17 00:00:00 2001 From: Ilgmi Date: Sun, 16 Jun 2024 09:32:55 +0200 Subject: [PATCH 127/482] Consul implement agent service and check (#7989) * Implement agent service and check (#7987) * implement update of service and check * update tests update documentation * update documentation * add consul_agent_check/service to action_groups check if unique_identifier of name is in params to get object add suggested improvements * update sanity * fix sanity issues update documentation * fix naming * fix naming check if response_data has data * fix sanity extra-docs * add as ignore maintainer in BOTMETA.yml update version_added to 8.4 * fix sanity * add to maintainers * Update plugins/modules/consul_agent_check.py Co-authored-by: Felix Fontein * Update plugins/modules/consul_agent_check.py Co-authored-by: Felix Fontein * Update plugins/modules/consul_agent_check.py Co-authored-by: Felix Fontein * update version_added * if create and update return no object as result we read the object again * get_first_appearing_identifier check the params for the given identifier and return it to simplify id vs name * add unique_identifiers as a new property and a method to decide which identifier should be used * fix sanity * add self to team consul remove params with no values add operational_attributes that inherited classes can set them get identifier value from object * fix sanity fix test * remove the possibility to add checks with consul_agent_check. check if service has changed * remove tests for idempotency check because for checks it is not possible * remove unique_identifier from consul.py change unique_identifier to unique_identifiers * get id from params * Revert "remove unique_identifier from consul.py" This reverts commit a4f0d0220dd23e95871914b152c25ff352097a2c. * update version to 8.5 * Revert "Revert "remove unique_identifier from consul.py"" This reverts commit d2c35cf04c8aaf5f0175d772f862a796e22e35d4. * update description update test * fix sanity tests * fix sanity tests * update documentation for agent_check * fix line length * add documentation * fix sanity * simplified check for Tcp Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * check duration with regex * fix * update documentation --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .github/BOTMETA.yml | 2 +- meta/runtime.yml | 2 + plugins/module_utils/consul.py | 67 +++- plugins/modules/consul_agent_check.py | 254 +++++++++++++++ plugins/modules/consul_agent_service.py | 289 ++++++++++++++++++ plugins/modules/consul_auth_method.py | 2 +- plugins/modules/consul_binding_rule.py | 2 +- plugins/modules/consul_policy.py | 2 +- plugins/modules/consul_role.py | 2 +- plugins/modules/consul_token.py | 4 +- .../consul/tasks/consul_agent_check.yml | 114 +++++++ .../consul/tasks/consul_agent_service.yml | 89 ++++++ .../integration/targets/consul/tasks/main.yml | 2 + 13 files changed, 810 insertions(+), 21 deletions(-) create mode 100644 plugins/modules/consul_agent_check.py create mode 100644 plugins/modules/consul_agent_service.py create mode 100644 tests/integration/targets/consul/tasks/consul_agent_check.yml create mode 100644 tests/integration/targets/consul/tasks/consul_agent_service.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 36d667706a..faedb42605 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1501,7 +1501,7 @@ macros: team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan apollo13 + team_consul: sgargan apollo13 Ilgmi team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers diff --git a/meta/runtime.yml b/meta/runtime.yml index edeb53005f..4f5007b4a4 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -6,6 +6,8 @@ requires_ansible: '>=2.13.0' action_groups: consul: + - consul_agent_check + - consul_agent_service - consul_auth_method - consul_binding_rule - consul_policy diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py index 68c1a130b4..cd54a105f8 100644 --- a/plugins/module_utils/consul.py +++ b/plugins/module_utils/consul.py @@ -10,6 +10,7 @@ __metaclass__ = type import copy import json +import re from ansible.module_utils.six.moves.urllib import error as urllib_error from ansible.module_utils.six.moves.urllib.parse import urlencode @@ -68,6 +69,25 @@ def camel_case_key(key): return "".join(parts) +def validate_check(check): + validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" + if check.get('Tcp') is not None: + match = re.match(validate_tcp_regex, check['Tcp']) + if not match: + raise Exception('tcp check must be in host:port format') + for duration in validate_duration_keys: + if duration in check and check[duration] is not None: + check[duration] = validate_duration(check[duration]) + + +def validate_duration(duration): + if duration: + if not re.search(r"\d+(?:ns|us|ms|s|m|h)", duration): + duration = "{0}s".format(duration) + return duration + + STATE_PARAMETER = "state" STATE_PRESENT = "present" STATE_ABSENT = "absent" @@ -81,7 +101,7 @@ OPERATION_DELETE = "remove" def _normalize_params(params, arg_spec): final_params = {} for k, v in params.items(): - if k not in arg_spec: # Alias + if k not in arg_spec or v is None: # Alias continue spec = arg_spec[k] if ( @@ -105,9 +125,10 @@ class _ConsulModule: """ api_endpoint = None # type: str - unique_identifier = None # type: str + unique_identifiers = None # type: list result_key = None # type: str create_only_fields = set() + operational_attributes = set() params = {} def __init__(self, module): @@ -119,6 +140,8 @@ class _ConsulModule: if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC } + self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) + def execute(self): obj = self.read_object() @@ -203,14 +226,24 @@ class _ConsulModule: return False def prepare_object(self, existing, obj): - operational_attributes = {"CreateIndex", "CreateTime", "Hash", "ModifyIndex"} existing = { - k: v for k, v in existing.items() if k not in operational_attributes + k: v for k, v in existing.items() if k not in self.operational_attributes } for k, v in obj.items(): existing[k] = v return existing + def id_from_obj(self, obj, camel_case=False): + def key_func(key): + return camel_case_key(key) if camel_case else key + + if self.unique_identifiers: + for identifier in self.unique_identifiers: + identifier = key_func(identifier) + if identifier in obj: + return obj[identifier] + return None + def endpoint_url(self, operation, identifier=None): if operation == OPERATION_CREATE: return self.api_endpoint @@ -219,7 +252,8 @@ class _ConsulModule: raise RuntimeError("invalid arguments passed") def read_object(self): - url = self.endpoint_url(OPERATION_READ, self.params.get(self.unique_identifier)) + identifier = self.id_from_obj(self.params) + url = self.endpoint_url(OPERATION_READ, identifier) try: return self.get(url) except RequestError as e: @@ -233,25 +267,28 @@ class _ConsulModule: if self._module.check_mode: return obj else: - return self.put(self.api_endpoint, data=self.prepare_object({}, obj)) + url = self.endpoint_url(OPERATION_CREATE) + created_obj = self.put(url, data=self.prepare_object({}, obj)) + if created_obj is None: + created_obj = self.read_object() + return created_obj def update_object(self, existing, obj): - url = self.endpoint_url( - OPERATION_UPDATE, existing.get(camel_case_key(self.unique_identifier)) - ) merged_object = self.prepare_object(existing, obj) if self._module.check_mode: return merged_object else: - return self.put(url, data=merged_object) + url = self.endpoint_url(OPERATION_UPDATE, self.id_from_obj(existing, camel_case=True)) + updated_obj = self.put(url, data=merged_object) + if updated_obj is None: + updated_obj = self.read_object() + return updated_obj def delete_object(self, obj): if self._module.check_mode: return {} else: - url = self.endpoint_url( - OPERATION_DELETE, obj.get(camel_case_key(self.unique_identifier)) - ) + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) return self.delete(url) def _request(self, method, url_parts, data=None, params=None): @@ -309,7 +346,9 @@ class _ConsulModule: if 400 <= status < 600: raise RequestError(status, response_data) - return json.loads(response_data) + if response_data: + return json.loads(response_data) + return None def get(self, url_parts, **kwargs): return self._request("GET", url_parts, **kwargs) diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py new file mode 100644 index 0000000000..3739260049 --- /dev/null +++ b/plugins/modules/consul_agent_check.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: consul_agent_check +short_description: Add, modify, and delete checks within a consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of checks in a consul + cluster via the agent. For more details on using and configuring Checks, + see U(https://developer.hashicorp.com/consul/api-docs/agent/check). + - Currently, there is no complete way to retrieve the script, interval or TTL + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An API method is planned to supply this metadata so at that + stage change management will be added. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + details: + - The result is the object as it is defined in the module options and not the object structure of the consul API. + For a better overview of what the object structure looks like, + take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks). + diff_mode: + support: partial + details: + - In check mode the diff will show the object as it is defined in the module options and not the object structure of the consul API. +options: + state: + description: + - Whether the check should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Required name for the service check. + type: str + id: + description: + - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary to provide + an ID for uniqueness. This value will return in the response as "CheckId". + type: str + interval: + description: + - The interval at which the service check will be run. + This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + - Required if one of the parameters O(args), O(http), or O(tcp) is specified. + type: str + notes: + description: + - Notes to attach to check when registering it. + type: str + args: + description: + - Specifies command arguments to run to update the status of the check. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + type: list + elements: str + ttl: + description: + - Checks can be registered with a TTL instead of a O(args) and O(interval) + this means that the service will check in with the agent before the + TTL expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a V(s) or V(m) suffix to + signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + - Mutually exclusive with O(args), O(tcp) and O(http). + type: str + tcp: + description: + - Checks can be registered with a TCP port. This means that consul + will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). + The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(http). + type: str + version_added: '1.3.0' + http: + description: + - Checks can be registered with an HTTP endpoint. This means that consul + will check that the http endpoint returns a successful HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(tcp). + type: str + timeout: + description: + - A custom HTTP check timeout. The consul default is 10 seconds. + Similar to the interval this is a number with a V(s) or V(m) suffix to + signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + type: str + service_id: + description: + - The ID for the service, must be unique per node. If O(state=absent), + defaults to the service name if supplied. + type: str +''' + +EXAMPLES = ''' +- name: Register tcp check for service 'nginx' + community.general.consul_agent_check: + name: nginx_tcp_check + service_id: nginx + interval: 60s + tcp: localhost:80 + notes: "Nginx Check" + +- name: Register http check for service 'nginx' + community.general.consul_agent_check: + name: nginx_http_check + service_id: nginx + interval: 60s + http: http://localhost:80/status + notes: "Nginx Check" + +- name: Remove check for service 'nginx' + community.general.consul_agent_check: + state: absent + id: nginx_http_check + service_id: "{{ nginx_service.ID }}" +''' + +RETURN = """ +check: + description: The check as returned by the consul HTTP API. + returned: always + type: dict + sample: + CheckID: nginx_check + ServiceID: nginx + Interval: 30s + Type: http + Notes: Nginx Check +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + OPERATION_READ, + _ConsulModule, + validate_check, +) + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "interval": dict(type='str'), + "notes": dict(type='str'), + "args": dict(type='list', elements='str'), + "http": dict(type='str'), + "tcp": dict(type='str'), + "ttl": dict(type='str'), + "timeout": dict(type='str'), + "service_id": dict(type='str'), +} + +_MUTUALLY_EXCLUSIVE = [ + ('args', 'ttl', 'tcp', 'http'), +] + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentCheckModule(_ConsulModule): + api_endpoint = "agent/check" + result_key = "check" + unique_identifiers = ["id", "name"] + operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", + "Status", "Type", "ExposedPort", "Definition"} + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return "agent/checks" + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier) + + def read_object(self): + url = self.endpoint_url(OPERATION_READ) + checks = self.get(url) + identifier = self.id_from_obj(self.params) + if identifier in checks: + return checks[identifier] + return None + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj) + validate_check(existing) + return existing + + def delete_object(self, obj): + if not self._module.check_mode: + self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID"))) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + mutually_exclusive=_MUTUALLY_EXCLUSIVE, + required_if=_REQUIRED_IF, + required_by=_REQUIRED_BY, + supports_check_mode=True, + ) + + consul_module = ConsulAgentCheckModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py new file mode 100644 index 0000000000..a8ef098970 --- /dev/null +++ b/plugins/modules/consul_agent_service.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: consul_agent_service +short_description: Add, modify and delete services within a consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of services in a consul + cluster via the agent. + - There are currently no plans to create services and checks in one. + This is because the Consul API does not provide checks for a service and + the checks themselves do not match the module parameters. + Therefore, only a service without checks can be created in this module. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff will miss operational attributes. +options: + state: + description: + - Whether the service should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for the service on a node, must be unique per node, + required if registering a service. + type: str + id: + description: + - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not provided. + If O(state=absent), defaults to the service name if supplied. + type: str + tags: + description: + - Tags that will be attached to the service registration. + type: list + elements: str + address: + description: + - The address to advertise that the service will be listening on. + This value will be passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API + documentation for further details. + type: str + meta: + description: + - Optional meta data used for filtering. + For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. + Not allowed characters are replaced with underscores. + type: dict + service_port: + description: + - The port on which the service is listening. Can optionally be supplied for + registration of a service, that is if O(name) or O(id) is set. + type: int + enable_tag_override: + description: + - Specifies to disable the anti-entropy feature for this service's tags. + If EnableTagOverride is set to true then external agents can update this service in the catalog and modify the tags. + type: bool + default: False + weights: + description: + - Specifies weights for the service + type: dict + suboptions: + passing: + description: + - Weights for passing. + type: int + default: 1 + warning: + description: + - Weights for warning. + type: int + default: 1 + default: {"passing": 1, "warning": 1} +''' + +EXAMPLES = ''' +- name: Register nginx service with the local consul agent + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with a tcp check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with an http check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Register nginx with some service meta + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + meta: + nginx_version: 1.25.3 + +- name: Remove nginx service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + service_id: nginx + state: absent + +- name: Register celery worker service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: celery-worker + tags: + - prod + - worker +''' + +RETURN = """ +service: + description: The service as returned by the consul HTTP API. + returned: always + type: dict + sample: + ID: nginx + Service: nginx + Address: localhost + Port: 80 + Tags: + - http + Meta: + - nginx_version: 1.23.3 + Datacenter: dc1 + Weights: + Passing: 1 + Warning: 1 + ContentHash: 61a245cd985261ac + EnableTagOverride: false +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + _ConsulModule +) + +_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "tags": dict(type='list', elements='str'), + "address": dict(type='str'), + "meta": dict(type='dict'), + "service_port": dict(type='int'), + "enable_tag_override": dict(type='bool', default=False), + "weights": dict(type='dict', options=dict( + passing=dict(type='int', default=1, no_log=False), + warning=dict(type='int', default=1) + ), default={"passing": 1, "warning": 1}) +} + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentServiceModule(_ConsulModule): + api_endpoint = "agent/service" + result_key = "service" + unique_identifiers = ["id", "name"] + operational_attributes = {"Service", "ContentHash", "Datacenter"} + + def endpoint_url(self, operation, identifier=None): + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier) + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj) + if "ServicePort" in existing: + existing["Port"] = existing.pop("ServicePort") + + if "ID" not in existing: + existing["ID"] = existing["Name"] + + return existing + + def needs_update(self, api_obj, module_obj): + obj = {} + if "Service" in api_obj: + obj["Service"] = api_obj["Service"] + api_obj = self.prepare_object(api_obj, obj) + + if "Name" in module_obj: + module_obj["Service"] = module_obj.pop("Name") + if "ServicePort" in module_obj: + module_obj["Port"] = module_obj.pop("ServicePort") + + return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj) + + def delete_object(self, obj): + if not self._module.check_mode: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + self.put(url) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=_REQUIRED_IF, + supports_check_mode=True, + ) + + consul_module = ConsulAgentServiceModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py index afe549f6ef..e28474c313 100644 --- a/plugins/modules/consul_auth_method.py +++ b/plugins/modules/consul_auth_method.py @@ -168,7 +168,7 @@ def normalize_ttl(ttl): class ConsulAuthMethodModule(_ConsulModule): api_endpoint = "acl/auth-method" result_key = "auth_method" - unique_identifier = "name" + unique_identifiers = ["name"] def map_param(self, k, v, is_update): if k == "config" and v: diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py index 88496f8675..6a2882cee2 100644 --- a/plugins/modules/consul_binding_rule.py +++ b/plugins/modules/consul_binding_rule.py @@ -124,7 +124,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import ( class ConsulBindingRuleModule(_ConsulModule): api_endpoint = "acl/binding-rule" result_key = "binding_rule" - unique_identifier = "id" + unique_identifiers = ["id"] def read_object(self): url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"]) diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py index 2ed6021b03..36139ac097 100644 --- a/plugins/modules/consul_policy.py +++ b/plugins/modules/consul_policy.py @@ -145,7 +145,7 @@ _ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) class ConsulPolicyModule(_ConsulModule): api_endpoint = "acl/policy" result_key = "policy" - unique_identifier = "id" + unique_identifiers = ["id"] def endpoint_url(self, operation, identifier=None): if operation == OPERATION_READ: diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py index e07e2036fe..d6c4e4dd92 100644 --- a/plugins/modules/consul_role.py +++ b/plugins/modules/consul_role.py @@ -212,7 +212,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import ( class ConsulRoleModule(_ConsulModule): api_endpoint = "acl/role" result_key = "role" - unique_identifier = "id" + unique_identifiers = ["id"] def endpoint_url(self, operation, identifier=None): if operation == OPERATION_READ: diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py index 02bc544da7..c8bc8bc279 100644 --- a/plugins/modules/consul_token.py +++ b/plugins/modules/consul_token.py @@ -235,13 +235,13 @@ def normalize_link_obj(api_obj, module_obj, key): class ConsulTokenModule(_ConsulModule): api_endpoint = "acl/token" result_key = "token" - unique_identifier = "accessor_id" + unique_identifiers = ["accessor_id"] create_only_fields = {"expiration_ttl"} def read_object(self): # if `accessor_id` is not supplied we can only create objects and are not idempotent - if not self.params.get(self.unique_identifier): + if not self.id_from_obj(self.params): return None return super(ConsulTokenModule, self).read_object() diff --git a/tests/integration/targets/consul/tasks/consul_agent_check.yml b/tests/integration/targets/consul/tasks/consul_agent_check.yml new file mode 100644 index 0000000000..e1229c794f --- /dev/null +++ b/tests/integration/targets/consul/tasks/consul_agent_check.yml @@ -0,0 +1,114 @@ +--- +# Copyright (c) 2024, Michael Ilg (@Ilgmi) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create a service + community.general.consul_agent_service: + name: nginx + service_port: 80 + address: localhost + tags: + - http + meta: + nginx_version: 1.25.3 + register: result + +- set_fact: + nginx_service: "{{result.service}}" + +- assert: + that: + - result is changed + - result.service.ID is defined + +- name: Add a check for service + community.general.consul_agent_check: + name: nginx_check + id: nginx_check + interval: 30s + http: http://localhost:80/morestatus + notes: "Nginx Check" + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.CheckID == 'nginx_check' + - result.check.ServiceID == 'nginx' + - result.check.Interval == '30s' + - result.check.Type == 'http' + - result.check.Notes == 'Nginx Check' + +- set_fact: + nginx_service_check: "{{ result.check }}" + +- name: Update check for service + community.general.consul_agent_check: + name: "{{ nginx_service_check.Name }}" + id: "{{ nginx_service_check.CheckID }}" + interval: 60s + http: http://localhost:80/morestatus + notes: "New Nginx Check" + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.CheckID == 'nginx_check' + - result.check.ServiceID == 'nginx' + - result.check.Interval == '1m0s' + - result.check.Type == 'http' + - result.check.Notes == 'New Nginx Check' + +- name: Remove check + community.general.consul_agent_check: + id: "{{ nginx_service_check.Name }}" + state: absent + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result is not failed + - result.operation == 'remove' + +- name: Add a check + community.general.consul_agent_check: + name: check + id: check + interval: 30s + tcp: localhost:80 + notes: "check" + register: result + +- assert: + that: + - result is changed + - result.check is defined + +- name: Update a check + community.general.consul_agent_check: + name: check + id: check + interval: 60s + tcp: localhost:80 + notes: "check" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.Interval == '1m0s' + +- name: Remove check + community.general.consul_agent_check: + id: check + state: absent + register: result \ No newline at end of file diff --git a/tests/integration/targets/consul/tasks/consul_agent_service.yml b/tests/integration/targets/consul/tasks/consul_agent_service.yml new file mode 100644 index 0000000000..95270f74b3 --- /dev/null +++ b/tests/integration/targets/consul/tasks/consul_agent_service.yml @@ -0,0 +1,89 @@ +--- +# Copyright (c) 2024, Michael Ilg (@Ilgmi) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create a service + community.general.consul_agent_service: + name: nginx + service_port: 80 + address: localhost + tags: + - http + meta: + nginx_version: 1.25.3 + register: result + +- set_fact: + nginx_service: "{{result.service}}" + +- assert: + that: + - result is changed + - result.service.ID is defined + - result.service.Service == 'nginx' + - result.service.Address == 'localhost' + - result.service.Port == 80 + - result.service.Tags[0] == 'http' + - result.service.Meta.nginx_version is defined + - result.service.Meta.nginx_version == '1.25.3' + - result.service.ContentHash is defined + +- name: Update service + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + name: "{{ nginx_service.Service }}" + service_port: 8080 + address: 127.0.0.1 + tags: + - http + - new_tag + meta: + nginx_version: 1.0.0 + nginx: 1.25.3 + register: result +- assert: + that: + - result is changed + - result.service.ID is defined + - result.service.Service == 'nginx' + - result.service.Address == '127.0.0.1' + - result.service.Port == 8080 + - result.service.Tags[0] == 'http' + - result.service.Tags[1] == 'new_tag' + - result.service.Meta.nginx_version is defined + - result.service.Meta.nginx_version == '1.0.0' + - result.service.Meta.nginx is defined + - result.service.Meta.nginx == '1.25.3' + - result.service.ContentHash is defined + +- name: Update service not changed when updating again without changes + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + name: "{{ nginx_service.Service }}" + service_port: 8080 + address: 127.0.0.1 + tags: + - http + - new_tag + meta: + nginx_version: 1.0.0 + nginx: 1.25.3 + register: result + +- assert: + that: + - result is not changed + - result.operation is not defined + +- name: Remove service + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + state: absent + register: result + +- assert: + that: + - result is changed + - result is not failed + - result.operation == 'remove' \ No newline at end of file diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml index 6fef2b9980..0ac58fc40e 100644 --- a/tests/integration/targets/consul/tasks/main.yml +++ b/tests/integration/targets/consul/tasks/main.yml @@ -97,6 +97,8 @@ - import_tasks: consul_token.yml - import_tasks: consul_auth_method.yml - import_tasks: consul_binding_rule.yml + - import_tasks: consul_agent_service.yml + - import_tasks: consul_agent_check.yml module_defaults: group/community.general.consul: token: "{{ consul_management_token }}" From ec4cf55566478b7311be04a3a573c050487c72c3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:46:03 +1200 Subject: [PATCH 128/482] simplify cmd_runner_fmt.as_bool_not() (#8512) * simplify cmd_runner_fmt.as_bool_not() * add changelog frag --- changelogs/fragments/8512-as-bool-not.yml | 2 ++ plugins/module_utils/cmd_runner.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8512-as-bool-not.yml diff --git a/changelogs/fragments/8512-as-bool-not.yml b/changelogs/fragments/8512-as-bool-not.yml new file mode 100644 index 0000000000..f579c19810 --- /dev/null +++ b/changelogs/fragments/8512-as-bool-not.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 8b4e76ba7c..da4f1b6fc5 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -127,7 +127,7 @@ class _Format(object): @staticmethod def as_bool_not(args): - return _ArgFormat(lambda value: [] if value else _ensure_list(args), ignore_none=False) + return _Format.as_bool([], args, ignore_none=False) @staticmethod def as_optval(arg, ignore_none=None): From 3716187fc35cb9ea8a0722745124bf2d9399246a Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Sun, 16 Jun 2024 21:14:18 +0100 Subject: [PATCH 129/482] Fix OpenNebula inventory crash when NIC does not have IP (#8489) * Fix OpenNebula inventory crash when NIC does not have IP Match IPv6 behaviour. When a NIC does not have an IP: File "ansible/inventory/manager.py", line 292, in parse_source plugin.parse(self._inventory, self._loader, source, cache=cache) File "ansible-cm/plugins/inventory/opennebula.py", line 263, in parse self._populate() File "ansible-cm/plugins/inventory/opennebula.py", line 226, in _populate servers = self._retrieve_servers(filter_by_label) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "ansible-cm/plugins/inventory/opennebula.py", line 210, in _retrieve_servers server['v4_first_ip'] = self._get_vm_ipv4(vm) ^^^^^^^^^^^^^^^^^^^^^ File "ansible-cm/plugins/inventory/opennebula.py", line 154, in _get_vm_ipv4 return net['IP'] * Update to call to match IPv6 and add changelog fragment * Update changelog fragment. --------- Co-authored-by: Felix Fontein --- .../8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml | 2 ++ plugins/inventory/opennebula.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml diff --git a/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml b/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml new file mode 100644 index 0000000000..3db86f364e --- /dev/null +++ b/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml @@ -0,0 +1,2 @@ +bugfixes: + - opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index b097307c39..bf81758ef1 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -143,7 +143,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): nic = [nic] for net in nic: - return net['IP'] + if net.get('IP'): + return net['IP'] return False From fd2cd5f28c7b2cc3b53f5366a94436a77f986dee Mon Sep 17 00:00:00 2001 From: Eike Waldt Date: Sun, 16 Jun 2024 22:14:31 +0200 Subject: [PATCH 130/482] keycloak_clientscope: add normalizations for attributes and protocol_mappers (#8496) Signed-off-by: Eike Waldt --- ...ycloak_clientscope-add-normalizations.yaml | 2 + plugins/modules/keycloak_clientscope.py | 38 ++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml diff --git a/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml b/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml new file mode 100644 index 0000000000..8af320cae0 --- /dev/null +++ b/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496). diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index d24e0f1f27..b962b932c9 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -301,10 +301,37 @@ end_state: ''' from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError + keycloak_argument_spec, get_token, KeycloakError, is_struct_included from ansible.module_utils.basic import AnsibleModule +def normalise_cr(clientscoperep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientscoperep: the clientscoperep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientscoperep dict + """ + # Avoid the dict passed in to be modified + clientscoperep = clientscoperep.copy() + + if 'attributes' in clientscoperep: + clientscoperep['attributes'] = list(sorted(clientscoperep['attributes'])) + + if 'protocolMappers' in clientscoperep: + clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientscoperep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + + return clientscoperep + + def sanitize_cr(clientscoperep): """ Removes probably sensitive details from a clientscoperep representation. @@ -317,7 +344,7 @@ def sanitize_cr(clientscoperep): if 'attributes' in result: if 'saml.signing.private.key' in result['attributes']: result['attributes']['saml.signing.private.key'] = 'no_log' - return result + return normalise_cr(result) def main(): @@ -458,6 +485,13 @@ def main(): result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) if module.check_mode: + # We can only compare the current clientscope with the proposed updates we have + before_norm = normalise_cr(before_clientscope, remove_ids=True) + desired_norm = normalise_cr(desired_clientscope, remove_ids=True) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = not is_struct_included(desired_norm, before_norm) module.exit_json(**result) # do the update From df7fe19bbe666f23e2bd247cb1476ca7961b616e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 17 Jun 2024 07:06:31 +0200 Subject: [PATCH 131/482] pacman: do not fail if there is nothing to do (#8514) Do not fail if there is nothing to do. --- changelogs/fragments/8514-pacman-empty.yml | 2 ++ plugins/modules/pacman.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8514-pacman-empty.yml diff --git a/changelogs/fragments/8514-pacman-empty.yml b/changelogs/fragments/8514-pacman-empty.yml new file mode 100644 index 0000000000..c51ba21acc --- /dev/null +++ b/changelogs/fragments/8514-pacman-empty.yml @@ -0,0 +1,2 @@ +bugfixes: + - "paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514)." diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py index 7f67b91039..f13bde317c 100644 --- a/plugins/modules/pacman.py +++ b/plugins/modules/pacman.py @@ -367,8 +367,9 @@ class Pacman(object): self.install_packages(pkgs) self.success() - # This shouldn't happen... - self.fail("This is a bug") + # This happens if an empty list has been provided for name + self.add_exit_infos(msg='Nothing to do') + self.success() def install_packages(self, pkgs): pkgs_to_install = [] From b11da288d2b49cb0a5a371fc33dec49a76ad8fbf Mon Sep 17 00:00:00 2001 From: desand01 Date: Mon, 17 Jun 2024 01:06:47 -0400 Subject: [PATCH 132/482] Keycloak set client authentification flows by name (#8428) * first commit * Add change logs * fix sanity * Sanity 2 * Test unset flows * Update plugins/modules/keycloak_client.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update plugins/modules/keycloak_client.py Co-authored-by: Felix Fontein * Update changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml Co-authored-by: Felix Fontein * Remove double traitement from "alias" * Update plugins/modules/keycloak_client.py Co-authored-by: Felix Fontein * Update plugins/modules/keycloak_client.py Co-authored-by: Felix Fontein --------- Co-authored-by: Andre Desrosiers Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- ...ign-auth-flow-by-name-keycloak-client.yaml | 2 + plugins/modules/keycloak_client.py | 111 ++++++++++++++- .../targets/keycloak_client/tasks/main.yml | 128 ++++++++++++++++++ 3 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml diff --git a/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml b/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml new file mode 100644 index 0000000000..d9bb9bc3ea --- /dev/null +++ b/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index 3628e5a517..efaa66e26d 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -340,6 +340,42 @@ options: description: - Override realm authentication flow bindings. type: dict + suboptions: + browser: + description: + - Flow ID of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) + and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + type: str + + browser_name: + description: + - Flow name of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) + and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + aliases: + - browserName + type: str + version_added: 9.1.0 + + direct_grant: + description: + - Flow ID of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) + and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive. + aliases: + - directGrant + type: str + + direct_grant_name: + description: + - Flow name of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) + and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive. + aliases: + - directGrantName + type: str + version_added: 9.1.0 aliases: - authenticationFlowBindingOverrides version_added: 3.4.0 @@ -781,6 +817,64 @@ def sanitize_cr(clientrep): return normalise_cr(result) +def get_authentication_flow_id(flow_name, realm, kc): + """ Get the authentication flow ID based on the flow name, realm, and Keycloak client. + + Args: + flow_name (str): The name of the authentication flow. + realm (str): The name of the realm. + kc (KeycloakClient): The Keycloak client instance. + + Returns: + str: The ID of the authentication flow. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + """ + flow = kc.get_authentication_flow_by_alias(flow_name, realm) + if flow: + return flow["id"] + kc.module.fail_json(msg='Authentification flow %s not found in realm %s' % (flow_name, realm)) + + +def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): + """ Convert a dictionary representing client flow bindings to a model representation. + + Args: + newClientFlowBinding (dict): A dictionary containing client flow bindings. + realm (str): The name of the realm. + kc (KeycloakClient): An instance of the KeycloakClient class. + + Returns: + dict: A dictionary representing the model flow bindings. The dictionary has two keys: + - "browser" (str or None): The ID of the browser authentication flow binding, or None if not provided. + - "direct_grant" (str or None): The ID of the direct grant authentication flow binding, or None if not provided. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + + """ + + modelFlow = { + "browser": None, + "direct_grant": None + } + + for k, v in newClientFlowBinding.items(): + if not v: + continue + if k == "browser": + modelFlow["browser"] = v + elif k == "browser_name": + modelFlow["browser"] = get_authentication_flow_id(v, realm, kc) + elif k == "direct_grant": + modelFlow["direct_grant"] = v + elif k == "direct_grant_name": + modelFlow["direct_grant"] = get_authentication_flow_id(v, realm, kc) + + return modelFlow + + def main(): """ Module execution @@ -799,6 +893,13 @@ def main(): config=dict(type='dict'), ) + authentication_flow_spec = dict( + browser=dict(type='str'), + browser_name=dict(type='str', aliases=['browserName']), + direct_grant=dict(type='str', aliases=['directGrant']), + direct_grant_name=dict(type='str', aliases=['directGrantName']), + ) + meta_args = dict( state=dict(default='present', choices=['present', 'absent']), realm=dict(type='str', default='master'), @@ -838,7 +939,13 @@ def main(): use_template_scope=dict(type='bool', aliases=['useTemplateScope']), use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), - authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), + authentication_flow_binding_overrides=dict( + type='dict', + aliases=['authenticationFlowBindingOverrides'], + options=authentication_flow_spec, + required_one_of=[['browser', 'direct_grant', 'browser_name', 'direct_grant_name']], + mutually_exclusive=[['browser', 'browser_name'], ['direct_grant', 'direct_grant_name']], + ), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), authorization_settings=dict(type='dict', aliases=['authorizationSettings']), default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), @@ -900,6 +1007,8 @@ def main(): # they are not specified if client_param == 'protocol_mappers': new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + elif client_param == 'authentication_flow_binding_overrides': + new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) changeset[camel(client_param)] = new_param_value diff --git a/tests/integration/targets/keycloak_client/tasks/main.yml b/tests/integration/targets/keycloak_client/tasks/main.yml index 5e7c7fae39..e1a7d2ebfb 100644 --- a/tests/integration/targets/keycloak_client/tasks/main.yml +++ b/tests/integration/targets/keycloak_client/tasks/main.yml @@ -103,3 +103,131 @@ assert: that: - check_client_when_present_and_changed is changed + +- name: Desire client with flow binding overrides + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser_name: browser + direct_grant_name: direct grant + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are set + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser | length > 0 + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant | length > 0 + +- name: Backup flow UUIDs + set_fact: + flow_browser_uuid: "{{ desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser }}" + flow_direct_grant_uuid: "{{ desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant }}" + +- name: Desire client with flow binding overrides remove direct_grant_name + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser_name: browser + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser | length > 0 + - "'direct_grant' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + +- name: Desire client with flow binding overrides remove browser add direct_grant + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + direct_grant_name: direct grant + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - "'browser' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant | length > 0 + +- name: Desire client with flow binding overrides with UUIDs + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser: "{{ flow_browser_uuid }}" + direct_grant: "{{ flow_direct_grant_uuid }}" + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser == flow_browser_uuid + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant == flow_direct_grant_uuid + +- name: Unset flow binding overrides + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser: "{{ None }}" + direct_grant: null + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are removed + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - "'browser' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + - "'direct_grant' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" \ No newline at end of file From 3314d5c8dbf34cf005e692198aaa72051e44cb7a Mon Sep 17 00:00:00 2001 From: Bruno Travouillon Date: Mon, 17 Jun 2024 01:07:07 -0400 Subject: [PATCH 133/482] proxmox_kvm: document that force requires archive (#8503) * proxmox_kvm: document that force requires archive As per `qm(1)`, the force option requires `archive`. Add this information in the `proxmox_kvm` module so one will know they have to define `archive` when using `force`. * fix: parameter is an option O(archive) Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/proxmox_kvm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 9fe805c7ab..71cbb51fc1 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -174,6 +174,7 @@ options: - Allow to force stop VM. - Can be used with states V(stopped), V(restarted), and V(absent). - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). + - Requires parameter O(archive). type: bool format: description: From 69b72e4a8ea1111503d9528f179d5338657426cc Mon Sep 17 00:00:00 2001 From: Colin Nolan Date: Mon, 17 Jun 2024 06:15:31 +0100 Subject: [PATCH 134/482] `cargo` module install from source in a given directory (#8480) * Fixes installed version for git/local. * Support latest determination with local source. * Adds docs. * Improves error message. * Setup for tests. * Updates copyright. * Align closer to #7895. * Adds changelog. * Check directory exists. * Stop using format strings. * Corrects directory arg type in docs. * Setup test repo dynamically. * Adds tests. * Adds version matching tests. * Update changelog fragment to match PR ID. * Updates copyright. * Import new directory tests. --- .../8480-directory-feature-cargo.yml | 2 + plugins/modules/cargo.py | 62 ++++++++- .../integration/targets/cargo/tasks/main.yml | 1 + .../targets/cargo/tasks/test_directory.yml | 122 ++++++++++++++++++ 4 files changed, 183 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8480-directory-feature-cargo.yml create mode 100644 tests/integration/targets/cargo/tasks/test_directory.yml diff --git a/changelogs/fragments/8480-directory-feature-cargo.yml b/changelogs/fragments/8480-directory-feature-cargo.yml new file mode 100644 index 0000000000..8892e7c5dd --- /dev/null +++ b/changelogs/fragments/8480-directory-feature-cargo.yml @@ -0,0 +1,2 @@ +minor_changes: + - "cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480)." diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py index ba9c05ed7b..2fc729da20 100644 --- a/plugins/modules/cargo.py +++ b/plugins/modules/cargo.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2021 Radek Sprta +# Copyright (c) 2024 Colin Nolan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -65,6 +66,13 @@ options: type: str default: present choices: [ "present", "absent", "latest" ] + directory: + description: + - Path to the source directory to install the Rust package from. + - This is only used when installing packages. + type: path + required: false + version_added: 9.1.0 requirements: - cargo installed """ @@ -98,8 +106,14 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi state: latest + +- name: Install "ludusavi" Rust package from source directory + community.general.cargo: + name: ludusavi + directory: /path/to/ludusavi/source """ +import json import os import re @@ -115,6 +129,7 @@ class Cargo(object): self.state = kwargs["state"] self.version = kwargs["version"] self.locked = kwargs["locked"] + self.directory = kwargs["directory"] @property def path(self): @@ -143,7 +158,7 @@ class Cargo(object): data, dummy = self._exec(cmd, True, False, False) - package_regex = re.compile(r"^([\w\-]+) v(.+):$") + package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$") installed = {} for line in data.splitlines(): package_info = package_regex.match(line) @@ -163,19 +178,53 @@ class Cargo(object): if self.version: cmd.append("--version") cmd.append(self.version) + if self.directory: + cmd.append("--path") + cmd.append(self.directory) return self._exec(cmd) def is_outdated(self, name): installed_version = self.get_installed().get(name) + latest_version = ( + self.get_latest_published_version(name) + if not self.directory + else self.get_source_directory_version(name) + ) + return installed_version != latest_version + def get_latest_published_version(self, name): cmd = ["search", name, "--limit", "1"] data, dummy = self._exec(cmd, True, False, False) match = re.search(r'"(.+)"', data) - if match: - latest_version = match.group(1) + if not match: + self.module.fail_json( + msg="No published version for package %s found" % name + ) + return match.group(1) - return installed_version != latest_version + def get_source_directory_version(self, name): + cmd = [ + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + os.path.join(self.directory, "Cargo.toml"), + ] + data, dummy = self._exec(cmd, True, False, False) + manifest = json.loads(data) + + package = next( + (package for package in manifest["packages"] if package["name"] == name), + None, + ) + if not package: + self.module.fail_json( + msg="Package %s not defined in source, found: %s" + % (name, [x["name"] for x in manifest["packages"]]) + ) + return package["version"] def uninstall(self, packages=None): cmd = ["uninstall"] @@ -191,16 +240,21 @@ def main(): state=dict(default="present", choices=["present", "absent", "latest"]), version=dict(default=None, type="str"), locked=dict(default=False, type="bool"), + directory=dict(default=None, type="path"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params["name"] state = module.params["state"] version = module.params["version"] + directory = module.params["directory"] if not name: module.fail_json(msg="Package name must be specified") + if directory is not None and not os.path.isdir(directory): + module.fail_json(msg="Source directory does not exist") + # Set LANG env since we parse stdout module.run_command_environ_update = dict( LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" diff --git a/tests/integration/targets/cargo/tasks/main.yml b/tests/integration/targets/cargo/tasks/main.yml index 29f27c3fda..89f13960a6 100644 --- a/tests/integration/targets/cargo/tasks/main.yml +++ b/tests/integration/targets/cargo/tasks/main.yml @@ -16,6 +16,7 @@ - block: - import_tasks: test_general.yml - import_tasks: test_version.yml + - import_tasks: test_directory.yml environment: "{{ cargo_environment }}" when: has_cargo | default(false) - import_tasks: test_rustup_cargo.yml diff --git a/tests/integration/targets/cargo/tasks/test_directory.yml b/tests/integration/targets/cargo/tasks/test_directory.yml new file mode 100644 index 0000000000..f4275ede68 --- /dev/null +++ b/tests/integration/targets/cargo/tasks/test_directory.yml @@ -0,0 +1,122 @@ +--- +# Copyright (c) 2024 Colin Nolan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create temp directory + tempfile: + state: directory + register: temp_directory + +- name: Test block + vars: + manifest_path: "{{ temp_directory.path }}/Cargo.toml" + package_name: hello-world-directory-test + block: + - name: Initialize package + ansible.builtin.command: + cmd: "cargo init --name {{ package_name }}" + args: + chdir: "{{ temp_directory.path }}" + + - name: Set package version (1.0.0) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "1.0.0"' + + - name: Ensure package is uninstalled + community.general.cargo: + name: "{{ package_name }}" + state: absent + directory: "{{ temp_directory.path }}" + register: uninstall_absent + + - name: Install package + community.general.cargo: + name: "{{ package_name }}" + directory: "{{ temp_directory.path }}" + register: install_absent + + - name: Change package version (1.0.1) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "1.0.1"' + + - name: Install package again (present) + community.general.cargo: + name: "{{ package_name }}" + state: present + directory: "{{ temp_directory.path }}" + register: install_present_state + + - name: Install package again (latest) + community.general.cargo: + name: "{{ package_name }}" + state: latest + directory: "{{ temp_directory.path }}" + register: install_latest_state + + - name: Change package version (2.0.0) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "2.0.0"' + + - name: Install package with given version (matched) + community.general.cargo: + name: "{{ package_name }}" + version: "2.0.0" + directory: "{{ temp_directory.path }}" + register: install_given_version_matched + + - name: Install package with given version (unmatched) + community.general.cargo: + name: "{{ package_name }}" + version: "2.0.1" + directory: "{{ temp_directory.path }}" + register: install_given_version_unmatched + ignore_errors: true + + - name: Uninstall package + community.general.cargo: + name: "{{ package_name }}" + state: absent + directory: "{{ temp_directory.path }}" + register: uninstall_present + + - name: Install non-existant package + community.general.cargo: + name: "{{ package_name }}-non-existant" + state: present + directory: "{{ temp_directory.path }}" + register: install_non_existant + ignore_errors: true + + - name: Install non-existant source directory + community.general.cargo: + name: "{{ package_name }}" + state: present + directory: "{{ temp_directory.path }}/non-existant" + register: install_non_existant_source + ignore_errors: true + + always: + - name: Remove temp directory + file: + path: "{{ temp_directory.path }}" + state: absent + +- name: Check assertions + assert: + that: + - uninstall_absent is not changed + - install_absent is changed + - install_present_state is not changed + - install_latest_state is changed + - install_given_version_matched is changed + - install_given_version_unmatched is failed + - uninstall_present is changed + - install_non_existant is failed + - install_non_existant_source is failed From 2612ceee3722d25810ceb805726b9c2e517a0985 Mon Sep 17 00:00:00 2001 From: Strahinja Kustudic Date: Mon, 17 Jun 2024 07:45:43 +0200 Subject: [PATCH 135/482] Fix launchd check-mode to report changed correctly (#8476) * Fix launchd check-mode to report changed correctly * Update changelog fragment. --------- Co-authored-by: Strahinja Kustudic Co-authored-by: Felix Fontein --- changelogs/fragments/8476-launchd-check-mode-changed.yaml | 2 ++ plugins/modules/launchd.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8476-launchd-check-mode-changed.yaml diff --git a/changelogs/fragments/8476-launchd-check-mode-changed.yaml b/changelogs/fragments/8476-launchd-check-mode-changed.yaml new file mode 100644 index 0000000000..dc1e60de36 --- /dev/null +++ b/changelogs/fragments/8476-launchd-check-mode-changed.yaml @@ -0,0 +1,2 @@ +bugfixes: + - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index e5942ea7cf..a6427bdb2f 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -514,7 +514,8 @@ def main(): result['status']['current_pid'] != result['status']['previous_pid']): result['changed'] = True if module.check_mode: - result['changed'] = True + if result['status']['current_state'] != action: + result['changed'] = True module.exit_json(**result) From 9a18963364b7870d845861ee200a5bcb6cce0e26 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 17 Jun 2024 07:56:19 +0200 Subject: [PATCH 136/482] Next expected release will be 9.2.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 5c1688ce8f..954334d918 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.1.0 +version: 9.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 86f19cb5d314aa0d52a14fa3dc4b16c427566352 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 18 Jun 2024 07:56:26 +0200 Subject: [PATCH 137/482] Update CI for ansible-core devel (#8534) * Update CI for ansible-core devel. * Uncomment platforms that cause problems. --- .azure-pipelines/azure-pipelines.yml | 37 +++++++++++++++++++--------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 3f9293ac10..a505ede732 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -172,12 +172,15 @@ stages: parameters: testFormat: devel/{0} targets: - - name: Alpine 3.19 - test: alpine/3.19 - # - name: Fedora 39 - # test: fedora/39 - - name: Ubuntu 22.04 - test: ubuntu/22.04 + - name: Alpine 3.20 + test: alpine/3.20 + # - name: Fedora 40 + # test: fedora/40 + # TODO: + #- name: Ubuntu 22.04 + # test: ubuntu/22.04 + # - name: Ubuntu 24.04 + # test: ubuntu/24.04 groups: - vm - stage: Remote_devel @@ -190,8 +193,9 @@ stages: targets: - name: macOS 14.3 test: macos/14.3 - - name: RHEL 9.3 - test: rhel/9.3 + # TODO: + #- name: RHEL 9.4 + # test: rhel/9.4 - name: FreeBSD 14.0 test: freebsd/14.0 groups: @@ -208,6 +212,8 @@ stages: targets: - name: FreeBSD 13.3 test: freebsd/13.3 + - name: RHEL 9.3 + test: rhel/9.3 groups: - 1 - 2 @@ -264,12 +270,15 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: Fedora 39 - test: fedora39 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 - name: Ubuntu 22.04 test: ubuntu2204 + # TODO: + #- name: Ubuntu 24.04 + # test: ubuntu2404 groups: - 1 - 2 @@ -282,8 +291,12 @@ stages: parameters: testFormat: 2.17/linux/{0} targets: + - name: Fedora 39 + test: fedora39 - name: Alpine 3.19 test: alpine319 + - name: Ubuntu 20.04 + test: ubuntu2004 groups: - 1 - 2 From ecb68aa5d273e92744542f5982b5014b13484139 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 18 Jun 2024 23:48:37 +0200 Subject: [PATCH 138/482] Finish updating CI (#8537) * Uncomment TODO entries. * Exclude some tests that fail or are known to fail. * Also run extra VM tests on Ubuntu 24.04. * Fix condition. * More adjustments. --- .azure-pipelines/azure-pipelines.yml | 19 ++++++++----------- .../integration/targets/django_manage/aliases | 1 + .../targets/ejabberd_user/tasks/main.yml | 6 ++++-- tests/integration/targets/homectl/aliases | 1 + .../targets/iptables_state/aliases | 2 ++ tests/integration/targets/iso_extract/aliases | 1 + tests/integration/targets/mqtt/tasks/main.yml | 2 +- tests/integration/targets/odbc/aliases | 1 + .../setup_java_keytool/vars/Ubuntu-24.yml | 8 ++++++++ .../vars/Ubuntu-24-py3.yml | 13 +++++++++++++ .../targets/setup_snap/tasks/D-RedHat-9.4.yml | 1 + tests/integration/targets/ufw/aliases | 1 + 12 files changed, 42 insertions(+), 14 deletions(-) create mode 100644 tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml create mode 100644 tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml create mode 120000 tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index a505ede732..aee822043f 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -176,11 +176,10 @@ stages: test: alpine/3.20 # - name: Fedora 40 # test: fedora/40 - # TODO: - #- name: Ubuntu 22.04 - # test: ubuntu/22.04 - # - name: Ubuntu 24.04 - # test: ubuntu/24.04 + - name: Ubuntu 22.04 + test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 groups: - vm - stage: Remote_devel @@ -193,9 +192,8 @@ stages: targets: - name: macOS 14.3 test: macos/14.3 - # TODO: - #- name: RHEL 9.4 - # test: rhel/9.4 + - name: RHEL 9.4 + test: rhel/9.4 - name: FreeBSD 14.0 test: freebsd/14.0 groups: @@ -276,9 +274,8 @@ stages: test: alpine320 - name: Ubuntu 22.04 test: ubuntu2204 - # TODO: - #- name: Ubuntu 24.04 - # test: ubuntu2404 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index 9790549169..ae3c2623a0 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -18,3 +18,4 @@ skip/rhel9.0 skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 +skip/rhel9.4 diff --git a/tests/integration/targets/ejabberd_user/tasks/main.yml b/tests/integration/targets/ejabberd_user/tasks/main.yml index 349b3f952f..d7f1670d06 100644 --- a/tests/integration/targets/ejabberd_user/tasks/main.yml +++ b/tests/integration/targets/ejabberd_user/tasks/main.yml @@ -11,8 +11,10 @@ - name: Bail out if not supported ansible.builtin.meta: end_play # TODO: remove Archlinux from the list - when: ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora', 'Archlinux') - + # TODO: remove Ubuntu 24.04 (noble) from the list + when: > + ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora', 'Archlinux') + or (ansible_distribution == 'Ubuntu' and ansible_distribution_release in ['noble']) - name: Remove ejabberd ansible.builtin.package: diff --git a/tests/integration/targets/homectl/aliases b/tests/integration/targets/homectl/aliases index ea9b442302..a226b55851 100644 --- a/tests/integration/targets/homectl/aliases +++ b/tests/integration/targets/homectl/aliases @@ -11,3 +11,4 @@ skip/rhel9.0 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.1 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.2 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.3 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ +skip/rhel9.4 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 5a02a630bc..76c58041b6 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -10,3 +10,5 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) + +skip/ubuntu22.04 # TODO there's a problem here! diff --git a/tests/integration/targets/iso_extract/aliases b/tests/integration/targets/iso_extract/aliases index 5ddca1ecbb..68cc0db486 100644 --- a/tests/integration/targets/iso_extract/aliases +++ b/tests/integration/targets/iso_extract/aliases @@ -11,6 +11,7 @@ skip/rhel9.0 # FIXME skip/rhel9.1 # FIXME skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME +skip/rhel9.4 # FIXME skip/freebsd12.4 # FIXME skip/freebsd13.2 # FIXME skip/freebsd13.3 # FIXME diff --git a/tests/integration/targets/mqtt/tasks/main.yml b/tests/integration/targets/mqtt/tasks/main.yml index 0beb1b3b27..3fd11643ee 100644 --- a/tests/integration/targets/mqtt/tasks/main.yml +++ b/tests/integration/targets/mqtt/tasks/main.yml @@ -11,4 +11,4 @@ - include_tasks: ubuntu.yml when: - ansible_distribution == 'Ubuntu' - - ansible_distribution_release not in ['focal', 'jammy'] + - ansible_distribution_release not in ['focal', 'jammy', 'noble'] diff --git a/tests/integration/targets/odbc/aliases b/tests/integration/targets/odbc/aliases index 91a6167251..ceb043895a 100644 --- a/tests/integration/targets/odbc/aliases +++ b/tests/integration/targets/odbc/aliases @@ -11,4 +11,5 @@ skip/rhel9.0 skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 +skip/rhel9.4 skip/freebsd diff --git a/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml b/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml new file mode 100644 index 0000000000..addf344fe2 --- /dev/null +++ b/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +keytool_package_names: + - ca-certificates-java + - openjdk-21-jre-headless diff --git a/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml b/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml new file mode 100644 index 0000000000..702bd9a5d1 --- /dev/null +++ b/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml @@ -0,0 +1,13 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/16/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/16/main" +pg_ver: 16 diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/ufw/aliases b/tests/integration/targets/ufw/aliases index 209a1153e4..b1dbfd2eb1 100644 --- a/tests/integration/targets/ufw/aliases +++ b/tests/integration/targets/ufw/aliases @@ -13,6 +13,7 @@ skip/rhel9.0 # FIXME skip/rhel9.1 # FIXME skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME +skip/rhel9.4 # FIXME skip/docker needs/root needs/target/setup_epel From 60ba7cab9390fbe19be5b4a967dd11de0fbae8c8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 21 Jun 2024 06:56:21 +1200 Subject: [PATCH 139/482] add test case for cmd_runner_fmt.as_list() (#8541) --- tests/unit/plugins/module_utils/test_cmd_runner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index fcdffe7d2c..8cee57b01e 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -32,6 +32,7 @@ TC_FORMATS = dict( simple_opt_val__int=(partial(cmd_runner_fmt.as_opt_val, "-t"), 42, ["-t", "42"], None), simple_opt_eq_val__str=(partial(cmd_runner_fmt.as_opt_eq_val, "--food"), "potatoes", ["--food=potatoes"], None), simple_opt_eq_val__int=(partial(cmd_runner_fmt.as_opt_eq_val, "--answer"), 42, ["--answer=42"], None), + simple_list_empty=(cmd_runner_fmt.as_list, [], [], None), simple_list_potato=(cmd_runner_fmt.as_list, "literal_potato", ["literal_potato"], None), simple_list_42=(cmd_runner_fmt.as_list, 42, ["42"], None), simple_list_min_len_ok=(partial(cmd_runner_fmt.as_list, min_len=1), 42, ["42"], None), From 0d50131d5ea8f1e82948b47daa4828432e5ddacf Mon Sep 17 00:00:00 2001 From: dlehrman Date: Thu, 20 Jun 2024 15:03:28 -0400 Subject: [PATCH 140/482] Enable Custom Cipher Selection for Redfish Modules (#8533) * Enable custom cipher selection for redfish modules Signed-off-by: David Ehrman * Add changelog fragment Signed-off-by: David Ehrman * Added version_added to the ciphers option in redfish modules Signed-off-by: David Ehrman --------- Signed-off-by: David Ehrman --- .../fragments/8533-add-ciphers-option.yml | 4 ++++ plugins/module_utils/redfish_utils.py | 13 ++++++------ plugins/modules/redfish_command.py | 18 +++++++++++++++- plugins/modules/redfish_config.py | 21 +++++++++++++++++-- plugins/modules/redfish_info.py | 17 ++++++++++++++- 5 files changed, 63 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/8533-add-ciphers-option.yml diff --git a/changelogs/fragments/8533-add-ciphers-option.yml b/changelogs/fragments/8533-add-ciphers-option.yml new file mode 100644 index 0000000000..7f9880ebee --- /dev/null +++ b/changelogs/fragments/8533-add-ciphers-option.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533). +... diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 139628bd9f..4240b9e4e7 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -42,7 +42,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): + data_modification=False, strip_etag_quotes=False, ciphers=None): self.root_uri = root_uri self.creds = creds self.timeout = timeout @@ -53,6 +53,7 @@ class RedfishUtils(object): self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes + self.ciphers = ciphers self._vendor = None self._init_session() @@ -149,7 +150,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=timeout) + use_proxy=True, timeout=timeout, ciphers=self.ciphers) headers = dict((k.lower(), v) for (k, v) in resp.info().items()) try: if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'): @@ -199,7 +200,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) try: data = json.loads(to_native(resp.read())) except Exception as e: @@ -253,7 +254,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'changed': False, @@ -288,7 +289,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, @@ -314,7 +315,7 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index 0f7a64b81f..f9b0c8bd3b 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -302,6 +302,17 @@ options: type: int default: 120 version_added: 9.1.0 + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -868,6 +879,7 @@ def main(): bios_attributes=dict(type="dict"), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=120), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -936,10 +948,14 @@ def main(): # BIOS Attributes options bios_attributes = module.params['bios_attributes'] + # ciphers + ciphers = module.params['ciphers'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, + ciphers=ciphers) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 129b33b2e6..25f3cffdb4 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -167,6 +167,18 @@ options: type: dict default: {} version_added: '7.5.0' + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 + author: - "Jose Delarosa (@jose-delarosa)" - "T S Kushal (@TSKushal)" @@ -405,7 +417,8 @@ def main(): storage_subsystem_id=dict(type='str', default=''), volume_ids=dict(type='list', default=[], elements='str'), secure_boot_enable=dict(type='bool', default=True), - volume_details=dict(type='dict', default={}) + volume_details=dict(type='dict', default={}), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -469,10 +482,14 @@ def main(): volume_details = module.params['volume_details'] storage_subsystem_id = module.params['storage_subsystem_id'] + # ciphers + ciphers = module.params['ciphers'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, + ciphers=ciphers) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index efcb34f016..b1b4a45ee5 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -73,6 +73,17 @@ options: - Handle to check the status of an update in progress. type: str version_added: '6.1.0' + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -423,6 +434,7 @@ def main(): timeout=dict(type='int', default=60), update_handle=dict(), manager=dict(), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -450,9 +462,12 @@ def main(): # manager manager = module.params['manager'] + # ciphers + ciphers = module.params['ciphers'] + # Build root URI root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module) + rf_utils = RedfishUtils(creds, root_uri, timeout, module, ciphers=ciphers) # Build Category list if "all" in module.params['category']: From 9e38161400be6d9f1e28f4674d8c99786a34acbc Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 21 Jun 2024 22:01:05 +0200 Subject: [PATCH 141/482] CI: Replace FreeBSD 14.0 with 14.1; add 14.0 for stable-2.17 (#8550) * Replace FreeBSD 14.0 with 14.1; add 14.0 for stable-2.17. * Skip tests that do not work. --- .azure-pipelines/azure-pipelines.yml | 6 ++++-- tests/integration/targets/filter_jc/aliases | 1 + tests/integration/targets/iso_extract/aliases | 1 + tests/integration/targets/pkgng/tasks/freebsd.yml | 5 ++++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index aee822043f..7c6470b8da 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -194,8 +194,8 @@ stages: test: macos/14.3 - name: RHEL 9.4 test: rhel/9.4 - - name: FreeBSD 14.0 - test: freebsd/14.0 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 @@ -212,6 +212,8 @@ stages: test: freebsd/13.3 - name: RHEL 9.3 test: rhel/9.3 + - name: FreeBSD 14.0 + test: freebsd/14.0 groups: - 1 - 2 diff --git a/tests/integration/targets/filter_jc/aliases b/tests/integration/targets/filter_jc/aliases index 4e11515666..a39321e96d 100644 --- a/tests/integration/targets/filter_jc/aliases +++ b/tests/integration/targets/filter_jc/aliases @@ -6,3 +6,4 @@ azp/posix/2 skip/python2.7 # jc only supports python3.x skip/freebsd13.3 # FIXME - ruyaml compilation fails skip/freebsd14.0 # FIXME - ruyaml compilation fails +skip/freebsd14.1 # FIXME - ruyaml compilation fails diff --git a/tests/integration/targets/iso_extract/aliases b/tests/integration/targets/iso_extract/aliases index 68cc0db486..27e07941a5 100644 --- a/tests/integration/targets/iso_extract/aliases +++ b/tests/integration/targets/iso_extract/aliases @@ -16,3 +16,4 @@ skip/freebsd12.4 # FIXME skip/freebsd13.2 # FIXME skip/freebsd13.3 # FIXME skip/freebsd14.0 # FIXME +skip/freebsd14.1 # FIXME diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml index 9d4ecf8bb2..e69d26c20d 100644 --- a/tests/integration/targets/pkgng/tasks/freebsd.yml +++ b/tests/integration/targets/pkgng/tasks/freebsd.yml @@ -521,12 +521,15 @@ # NOTE: FreeBSD 14.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 14.1 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # # See also # https://github.com/ansible-collections/community.general/issues/5795 when: >- (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<')) or (ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14.0', '<')) - or ansible_distribution_version is version('14.1', '>=') + or ansible_distribution_version is version('14.2', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml From 1053545870f0f71eae8cb05dddc995f6ee737f50 Mon Sep 17 00:00:00 2001 From: Noah Lehmann <62204532+noahlehmann@users.noreply.github.com> Date: Sat, 22 Jun 2024 10:49:09 +0200 Subject: [PATCH 142/482] keycloak_clientscope: ignore ids on diff check (#8545) * keycloak_clientscope: ignore ids on diff check * keycloak_clientscope: add changelog fragment * keycloak_clientscope: Include changelog fragment change suggestion Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8545-keycloak-clientscope-remove-id-on-compare.yml | 2 ++ plugins/modules/keycloak_clientscope.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml diff --git a/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml b/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml new file mode 100644 index 0000000000..5986a45b87 --- /dev/null +++ b/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index b962b932c9..b8ee842195 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -472,7 +472,9 @@ def main(): # Process an update # no changes - if desired_clientscope == before_clientscope: + # remove ids for compare, problematic if desired has no ids set (not required), + # normalize for consentRequired in protocolMappers + if normalise_cr(desired_clientscope, remove_ids=True) == normalise_cr(before_clientscope, remove_ids=True): result['changed'] = False result['end_state'] = sanitize_cr(desired_clientscope) result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) From 70c8042c997b33c2170d2c65020f27e1768386b6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 27 Jun 2024 21:56:19 +1200 Subject: [PATCH 143/482] proxmox_template: small refactor in get_template() (#8516) * proxmox_template: small refactor in get_template() * add changelog frag * Update plugins/modules/proxmox_template.py Co-authored-by: Felix Fontein * rename function as per PR suggestion --------- Co-authored-by: Felix Fontein --- .../fragments/8516-proxmox-template-refactor.yml | 2 ++ plugins/modules/proxmox_template.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8516-proxmox-template-refactor.yml diff --git a/changelogs/fragments/8516-proxmox-template-refactor.yml b/changelogs/fragments/8516-proxmox-template-refactor.yml new file mode 100644 index 0000000000..c069985111 --- /dev/null +++ b/changelogs/fragments/8516-proxmox-template-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516). diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py index f73109931f..134286164c 100644 --- a/plugins/modules/proxmox_template.py +++ b/plugins/modules/proxmox_template.py @@ -144,12 +144,12 @@ except ImportError: class ProxmoxTemplateAnsible(ProxmoxAnsible): - def get_template(self, node, storage, content_type, template): + def has_template(self, node, storage, content_type, template): + volid = '%s:%s/%s' % (storage, content_type, template) try: - return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get() - if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] + return any(tmpl['volid'] == volid for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()) except Exception as e: - self.module.fail_json(msg="Failed to retrieve template '%s:%s/%s': %s" % (storage, content_type, template, e)) + self.module.fail_json(msg="Failed to retrieve template '%s': %s" % (volid, e)) def task_status(self, node, taskid, timeout): """ @@ -190,7 +190,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible): volid = '%s:%s/%s' % (storage, content_type, template) self.proxmox_api.nodes(node).storage(storage).content.delete(volid) while timeout: - if not self.get_template(node, storage, content_type, template): + if not self.has_template(node, storage, content_type, template): return True timeout = timeout - 1 if timeout == 0: @@ -239,14 +239,14 @@ def main(): if not template: module.fail_json(msg='template param for downloading appliance template is mandatory') - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) if proxmox.download_template(node, storage, template, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) template = os.path.basename(src) - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) elif not src: module.fail_json(msg='src param to uploading template file is mandatory') @@ -261,7 +261,7 @@ def main(): content_type = module.params['content_type'] template = module.params['template'] - if not proxmox.get_template(node, storage, content_type, template): + if not proxmox.has_template(node, storage, content_type, template): module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) if proxmox.delete_template(node, storage, content_type, template, timeout): From 6e0142fe3aa7c0e8087a51e352c931dc7ddcf7f0 Mon Sep 17 00:00:00 2001 From: Kai Date: Thu, 27 Jun 2024 11:56:48 +0200 Subject: [PATCH 144/482] bitwarden: Fix KeyError in search_field (#8549) (#8557) * bitwarden: Fix KeyError in search_field (#8549) * Update changelogs/fragments/8557-fix-bug-with-bitwarden.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8557-fix-bug-with-bitwarden.yml | 2 ++ plugins/lookup/bitwarden.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8557-fix-bug-with-bitwarden.yml diff --git a/changelogs/fragments/8557-fix-bug-with-bitwarden.yml b/changelogs/fragments/8557-fix-bug-with-bitwarden.yml new file mode 100644 index 0000000000..cf41ae209f --- /dev/null +++ b/changelogs/fragments/8557-fix-bug-with-bitwarden.yml @@ -0,0 +1,2 @@ +bugfixes: + - "bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557)." \ No newline at end of file diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 7584cd98a6..5e31cc6f89 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -174,8 +174,9 @@ class Bitwarden(object): else: initial_matches = [initial_matches] - # Filter to only include results from the right field. - return [item for item in initial_matches if not search_value or item[search_field] == search_value] + # Filter to only include results from the right field, if a search is requested by value or field + return [item for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value] def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): """Return a list of the specified field for records whose search_field match search_value From 01d8c7b7691172bf0c01b549f0fd73e91a19c3ad Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 1 Jul 2024 13:43:56 +0200 Subject: [PATCH 145/482] Fix CI for CentOS 7 (#8567) Fix CI for CentOS 7. --- tests/integration/targets/setup_pkg_mgr/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/tests/integration/targets/setup_pkg_mgr/tasks/main.yml index 5bff53b3b1..91f406d861 100644 --- a/tests/integration/targets/setup_pkg_mgr/tasks/main.yml +++ b/tests/integration/targets/setup_pkg_mgr/tasks/main.yml @@ -26,6 +26,12 @@ cacheable: true when: ansible_os_family == "Archlinux" +- shell: + cmd: | + sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/*.repo + sed -i 's%#baseurl=http://mirror.centos.org/%baseurl=https://vault.centos.org/%g' /etc/yum.repos.d/*.repo + when: ansible_distribution in 'CentOS' and ansible_distribution_major_version == '7' + - shell: cmd: | sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*.repo From 5259caacae58204b7140a373a11dcb5ac5f2365e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 5 Jul 2024 18:36:17 +1200 Subject: [PATCH 146/482] cmd_runner - handle special value "auto" in param force_lang (#8517) * cmd_runner - handle special value "auto" in param force_lang * add changelog frag * update doc in puppet * fix markup --- changelogs/fragments/8517-cmd-runner-lang-auto.yml | 2 ++ plugins/module_utils/cmd_runner.py | 9 ++++++++- plugins/modules/puppet.py | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8517-cmd-runner-lang-auto.yml diff --git a/changelogs/fragments/8517-cmd-runner-lang-auto.yml b/changelogs/fragments/8517-cmd-runner-lang-auto.yml new file mode 100644 index 0000000000..086a74e997 --- /dev/null +++ b/changelogs/fragments/8517-cmd-runner-lang-auto.yml @@ -0,0 +1,2 @@ +minor_changes: + - CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index da4f1b6fc5..95167a282d 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -11,6 +11,7 @@ from functools import wraps from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.six import iteritems +from ansible.module_utils.common.locale import get_best_parsable_locale def _ensure_list(value): @@ -236,7 +237,13 @@ class CmdRunner(object): fmt = _Format.as_func(func=fmt, ignore_none=True) self.arg_formats[fmt_name] = fmt self.check_rc = check_rc - self.force_lang = force_lang + if force_lang == "auto": + try: + self.force_lang = get_best_parsable_locale() + except RuntimeWarning: + self.force_lang = "C" + else: + self.force_lang = force_lang self.path_prefix = path_prefix if environ_update is None: environ_update = {} diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index 073a083247..46326c667f 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -128,6 +128,8 @@ options: - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the selected locale is supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module will + try and determine the best parseable locale to use. type: str default: C version_added: 8.6.0 From caecb2297fbf8cfb9ae80926ead10b4fb11a003f Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Fri, 5 Jul 2024 08:42:35 +0200 Subject: [PATCH 147/482] Feature. Add chapter 'Lists of dictionaries' to docsite (#8482) * Feature. Add chapter 'Lists of dictionaries' * Fix copyright and licensing. * Add maintainers for docsite chapter 'Lists of dictionaries'. * Generate docs keep_keys and remove_keys * Update integration tests of keep_keys and remove_keys * Update docs helpers of keep_keys and remove_keys * Fix copyright and licensing. * Fix remove license from templates. Cleanup. * Add docs helper replace_keys * Update integration test filter_replace_keys * Generate and update: filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst * Formatting improved. * Fix results Jinja quotation marks. * Update docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 Co-authored-by: Felix Fontein * Update docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 Co-authored-by: Felix Fontein * Fix references. * Updated helpers. * Fix licenses. Simplified templates. * Fix licenses. * Fix README. --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 8 + docs/docsite/helper/keep_keys/README.md | 61 ++++++ ...ons-lists_of_dictionaries-keep_keys.rst.j2 | 80 ++++++++ .../helper/keep_keys/keep_keys.rst.sha1 | 1 + .../keep_keys/keep_keys.rst.sha1.license | 3 + docs/docsite/helper/keep_keys/playbook.yml | 79 ++++++++ docs/docsite/helper/keep_keys/tests.yml.sha1 | 1 + .../helper/keep_keys/tests.yml.sha1.license | 3 + docs/docsite/helper/remove_keys/README.md | 61 ++++++ ...s-lists_of_dictionaries-remove_keys.rst.j2 | 80 ++++++++ docs/docsite/helper/remove_keys/playbook.yml | 79 ++++++++ .../helper/remove_keys/remove_keys.rst.sha1 | 1 + .../remove_keys/remove_keys.rst.sha1.license | 3 + .../docsite/helper/remove_keys/tests.yml.sha1 | 1 + .../helper/remove_keys/tests.yml.sha1.license | 3 + docs/docsite/helper/replace_keys/README.md | 61 ++++++ ...-lists_of_dictionaries-replace_keys.rst.j2 | 110 +++++++++++ docs/docsite/helper/replace_keys/playbook.yml | 79 ++++++++ .../helper/replace_keys/replace_keys.rst.sha1 | 1 + .../replace_keys.rst.sha1.license | 3 + .../helper/replace_keys/tests.yml.sha1 | 1 + .../replace_keys/tests.yml.sha1.license | 3 + ...ations-lists_of_dictionaries-keep_keys.rst | 151 +++++++++++++++ ...ions-lists_of_dictionaries-remove_keys.rst | 159 ++++++++++++++++ ...ons-lists_of_dictionaries-replace_keys.rst | 175 ++++++++++++++++++ ...act_informations-lists_of_dictionaries.rst | 18 ++ .../filter_guide_abstract_informations.rst | 1 + .../filter_keep_keys/tasks/keep_keys.yml | 79 -------- .../targets/filter_keep_keys/tasks/main.yml | 4 +- .../targets/filter_keep_keys/tasks/tests.yml | 31 ++++ .../filter_keep_keys/templates/default.j2 | 1 + .../templates/default.j2.license | 3 + .../targets/filter_keep_keys/templates/mp.j2 | 1 + .../filter_keep_keys/templates/mp.j2.license | 3 + .../targets/filter_keep_keys/vars/main.yml | 33 ---- .../filter_keep_keys/vars/main/tests.yml | 40 ++++ .../targets/filter_remove_keys/tasks/main.yml | 4 +- .../filter_remove_keys/tasks/remove_keys.yml | 79 -------- .../filter_remove_keys/tasks/tests.yml | 31 ++++ .../filter_remove_keys/templates/default.j2 | 1 + .../templates/default.j2.license | 3 + .../filter_remove_keys/templates/mp.j2 | 1 + .../templates/mp.j2.license | 3 + .../targets/filter_remove_keys/vars/main.yml | 33 ---- .../filter_remove_keys/vars/main/tests.yml | 40 ++++ .../tasks/fn-test-replace_keys.yml | 21 --- .../filter_replace_keys/tasks/main.yml | 4 +- .../tasks/replace_keys.yml | 56 ------ .../filter_replace_keys/tasks/tests.yml | 31 ++++ .../filter_replace_keys/templates/default.j2 | 1 + .../templates/default.j2.license | 3 + .../filter_replace_keys/templates/mp.j2 | 1 + .../templates/mp.j2.license | 3 + .../targets/filter_replace_keys/vars/main.yml | 58 ------ .../filter_replace_keys/vars/main/tests.yml | 71 +++++++ 55 files changed, 1500 insertions(+), 365 deletions(-) create mode 100644 docs/docsite/helper/keep_keys/README.md create mode 100644 docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 create mode 100644 docs/docsite/helper/keep_keys/keep_keys.rst.sha1 create mode 100644 docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license create mode 100644 docs/docsite/helper/keep_keys/playbook.yml create mode 100644 docs/docsite/helper/keep_keys/tests.yml.sha1 create mode 100644 docs/docsite/helper/keep_keys/tests.yml.sha1.license create mode 100644 docs/docsite/helper/remove_keys/README.md create mode 100644 docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 create mode 100644 docs/docsite/helper/remove_keys/playbook.yml create mode 100644 docs/docsite/helper/remove_keys/remove_keys.rst.sha1 create mode 100644 docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license create mode 100644 docs/docsite/helper/remove_keys/tests.yml.sha1 create mode 100644 docs/docsite/helper/remove_keys/tests.yml.sha1.license create mode 100644 docs/docsite/helper/replace_keys/README.md create mode 100644 docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 create mode 100644 docs/docsite/helper/replace_keys/playbook.yml create mode 100644 docs/docsite/helper/replace_keys/replace_keys.rst.sha1 create mode 100644 docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license create mode 100644 docs/docsite/helper/replace_keys/tests.yml.sha1 create mode 100644 docs/docsite/helper/replace_keys/tests.yml.sha1.license create mode 100644 docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst create mode 100644 docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst create mode 100644 docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst create mode 100644 docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst delete mode 100644 tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml create mode 100644 tests/integration/targets/filter_keep_keys/tasks/tests.yml create mode 100644 tests/integration/targets/filter_keep_keys/templates/default.j2 create mode 100644 tests/integration/targets/filter_keep_keys/templates/default.j2.license create mode 100644 tests/integration/targets/filter_keep_keys/templates/mp.j2 create mode 100644 tests/integration/targets/filter_keep_keys/templates/mp.j2.license delete mode 100644 tests/integration/targets/filter_keep_keys/vars/main.yml create mode 100644 tests/integration/targets/filter_keep_keys/vars/main/tests.yml delete mode 100644 tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml create mode 100644 tests/integration/targets/filter_remove_keys/tasks/tests.yml create mode 100644 tests/integration/targets/filter_remove_keys/templates/default.j2 create mode 100644 tests/integration/targets/filter_remove_keys/templates/default.j2.license create mode 100644 tests/integration/targets/filter_remove_keys/templates/mp.j2 create mode 100644 tests/integration/targets/filter_remove_keys/templates/mp.j2.license delete mode 100644 tests/integration/targets/filter_remove_keys/vars/main.yml create mode 100644 tests/integration/targets/filter_remove_keys/vars/main/tests.yml delete mode 100644 tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml delete mode 100644 tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml create mode 100644 tests/integration/targets/filter_replace_keys/tasks/tests.yml create mode 100644 tests/integration/targets/filter_replace_keys/templates/default.j2 create mode 100644 tests/integration/targets/filter_replace_keys/templates/default.j2.license create mode 100644 tests/integration/targets/filter_replace_keys/templates/mp.j2 create mode 100644 tests/integration/targets/filter_replace_keys/templates/mp.j2.license delete mode 100644 tests/integration/targets/filter_replace_keys/vars/main.yml create mode 100644 tests/integration/targets/filter_replace_keys/vars/main/tests.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index faedb42605..17659231fb 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1444,6 +1444,14 @@ files: maintainers: felixfontein docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst: maintainers: cfiehe + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst: + maintainers: vbotka docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst: maintainers: vbotka docs/docsite/rst/filter_guide_conversions.rst: diff --git a/docs/docsite/helper/keep_keys/README.md b/docs/docsite/helper/keep_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/keep_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 new file mode 100644 index 0000000000..77281549ba --- /dev/null +++ b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 @@ -0,0 +1,80 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} diff --git a/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 new file mode 100644 index 0000000000..532c6a192c --- /dev/null +++ b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 @@ -0,0 +1 @@ +8690afce792abc95693c2f61f743ee27388b1592 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst diff --git a/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/keep_keys/playbook.yml b/docs/docsite/helper/keep_keys/playbook.yml new file mode 100644 index 0000000000..75ef90385b --- /dev/null +++ b/docs/docsite/helper/keep_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/keep_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: keep_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/keep_keys/tests.yml.sha1 b/docs/docsite/helper/keep_keys/tests.yml.sha1 new file mode 100644 index 0000000000..fcf41a4347 --- /dev/null +++ b/docs/docsite/helper/keep_keys/tests.yml.sha1 @@ -0,0 +1 @@ +c6fc4ee2017d9222675bcd13cc4f88ba8d14f38d ../../../../tests/integration/targets/filter_keep_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/keep_keys/tests.yml.sha1.license b/docs/docsite/helper/keep_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/keep_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/remove_keys/README.md b/docs/docsite/helper/remove_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/remove_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 new file mode 100644 index 0000000000..62b25c344c --- /dev/null +++ b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 @@ -0,0 +1,80 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} diff --git a/docs/docsite/helper/remove_keys/playbook.yml b/docs/docsite/helper/remove_keys/playbook.yml new file mode 100644 index 0000000000..a2243d992e --- /dev/null +++ b/docs/docsite/helper/remove_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/remove_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: remove_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 new file mode 100644 index 0000000000..a1c9e18210 --- /dev/null +++ b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 @@ -0,0 +1 @@ +3cc606b42e3d450cf6323f25930f7c5a591fa086 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst diff --git a/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/remove_keys/tests.yml.sha1 b/docs/docsite/helper/remove_keys/tests.yml.sha1 new file mode 100644 index 0000000000..107a64d73c --- /dev/null +++ b/docs/docsite/helper/remove_keys/tests.yml.sha1 @@ -0,0 +1 @@ +0554335045f02d8c37b824355b0cf86864cee9a5 ../../../../tests/integration/targets/filter_remove_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/remove_keys/tests.yml.sha1.license b/docs/docsite/helper/remove_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/remove_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/replace_keys/README.md b/docs/docsite/helper/replace_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/replace_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 new file mode 100644 index 0000000000..fb0af32f2f --- /dev/null +++ b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 @@ -0,0 +1,110 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 3 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +{% for i in tests[3:4]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + {{ i.0.input | to_yaml(indent=2) | indent(5) }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ i.0.result | to_yaml(indent=2) | indent(5) }} + +{% endfor %} diff --git a/docs/docsite/helper/replace_keys/playbook.yml b/docs/docsite/helper/replace_keys/playbook.yml new file mode 100644 index 0000000000..3619000144 --- /dev/null +++ b/docs/docsite/helper/replace_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/replace_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: replace_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 new file mode 100644 index 0000000000..2ae692f3cc --- /dev/null +++ b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 @@ -0,0 +1 @@ +403f23c02ac02b1c3b611cb14f9b3ba59dc3f587 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst diff --git a/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/replace_keys/tests.yml.sha1 b/docs/docsite/helper/replace_keys/tests.yml.sha1 new file mode 100644 index 0000000000..53944ddf74 --- /dev/null +++ b/docs/docsite/helper/replace_keys/tests.yml.sha1 @@ -0,0 +1 @@ +2e54f3528c95cca746d5748f1ed7ada56ad0890e ../../../../tests/integration/targets/filter_replace_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/replace_keys/tests.yml.sha1.license b/docs/docsite/helper/replace_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/replace_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst new file mode 100644 index 0000000000..488cb2ce7d --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -0,0 +1,151 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0} + - {k0_x0: A1} + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst new file mode 100644 index 0000000000..03d4710f3a --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst new file mode 100644 index 0000000000..ba1bcad502 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + - {after: a0, before: k0_x0} + - {after: a1, before: k1_x1} + + result: "{{ input | community.general.replace_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +1. Replace keys that starts with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: a0, before: k0} + - {after: a1, before: k1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +2. Replace keys that ends with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: ends_with + target: + - {after: a0, before: x0} + - {after: a1, before: x1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +3. Replace keys that match any regex of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: regex + target: + - {after: a0, before: ^.*0_x.*$} + - {after: a1, before: ^.*1_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: foo} + - {X: bar} + + +4. If more keys match the same attribute before the last one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +5. If there are items with equal attribute before the first one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + - {after: Y, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +6. If there are more matches for a key the first one will be used. + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: X, before: a} + - {after: Y, before: aa} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} + + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst new file mode 100644 index 0000000000..42737c44b7 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -0,0 +1,18 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: + +Lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^ + +Filters to manage keys in a list of dictionaries: + +.. toctree:: + :maxdepth: 1 + + filter_guide-abstract_informations-lists_of_dictionaries-keep_keys + filter_guide-abstract_informations-lists_of_dictionaries-remove_keys + filter_guide-abstract_informations-lists_of_dictionaries-replace_keys diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst index cac85089a0..818c09f02c 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations.rst @@ -11,6 +11,7 @@ Abstract transformations filter_guide_abstract_informations_dictionaries filter_guide_abstract_informations_grouping + filter_guide-abstract_informations-lists_of_dictionaries filter_guide_abstract_informations_merging_lists_of_dictionaries filter_guide_abstract_informations_lists_helper filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml b/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml deleted file mode 100644 index 94825c9d61..0000000000 --- a/tests/integration/targets/filter_keep_keys/tasks/keep_keys.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Debug ansible_version - ansible.builtin.debug: - var: ansible_version - when: not quite_test | d(true) | bool - tags: ansible_version - -- name: Test keep keys equal (default) - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ list1 | community.general.keep_keys(target=tt) }}" - tt: [k0_x0, k1_x1] - tags: equal_default - -- name: Test keep keys regex string - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ list1 | community.general.keep_keys(target=tt, matching_parameter=mp) }}" - mp: regex - tt: '^.*[01]_x.*$' - tags: regex_string - -- name: Test keep keys targets1 - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - loop: "{{ targets1 }}" - loop_control: - label: "{{ item.mp }}: {{ item.tt }}" - vars: - rr: "{{ list1 | community.general.keep_keys(target=item.tt, matching_parameter=item.mp) }}" - tags: targets1 - -- name: Test keep keys targets2 - ansible.builtin.assert: - that: - - (rr | difference(result2) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - loop: "{{ targets2 }}" - loop_control: - label: "{{ item.mp }}: {{ item.tt }}" - vars: - rr: "{{ list2 | community.general.keep_keys(target=item.tt, matching_parameter=item.mp) }}" - tags: targets2 diff --git a/tests/integration/targets/filter_keep_keys/tasks/main.yml b/tests/integration/targets/filter_keep_keys/tasks/main.yml index 23457d1e11..9c0674780e 100644 --- a/tests/integration/targets/filter_keep_keys/tasks/main.yml +++ b/tests/integration/targets/filter_keep_keys/tasks/main.yml @@ -3,5 +3,5 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Test keep_keys - import_tasks: keep_keys.yml +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_keep_keys/tasks/tests.yml b/tests/integration/targets/filter_keep_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_keep_keys/templates/default.j2 b/tests/integration/targets/filter_keep_keys/templates/default.j2 new file mode 100644 index 0000000000..cb1232f9ee --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.keep_keys(target=target) }} diff --git a/tests/integration/targets/filter_keep_keys/templates/default.j2.license b/tests/integration/targets/filter_keep_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_keep_keys/templates/mp.j2 b/tests/integration/targets/filter_keep_keys/templates/mp.j2 new file mode 100644 index 0000000000..753698d420 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_keep_keys/templates/mp.j2.license b/tests/integration/targets/filter_keep_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_keep_keys/vars/main.yml b/tests/integration/targets/filter_keep_keys/vars/main.yml deleted file mode 100644 index b25325253d..0000000000 --- a/tests/integration/targets/filter_keep_keys/vars/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -targets1: - - {mp: equal, tt: [k0_x0, k1_x1]} - - {mp: starts_with, tt: [k0, k1]} - - {mp: ends_with, tt: [x0, x1]} - - {mp: regex, tt: ['^.*[01]_x.*$']} - - {mp: regex, tt: '^.*[01]_x.*$'} - -list1: - - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} - -result1: - - {k0_x0: A0, k1_x1: B0} - - {k0_x0: A1, k1_x1: B1} - -targets2: - - {mp: equal, tt: k0_x0} - - {mp: starts_with, tt: k0} - - {mp: ends_with, tt: x0} - - {mp: regex, tt: '^.*0_x.*$'} - -list2: - - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} - -result2: - - {k0_x0: A0} - - {k0_x0: A1} diff --git a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml new file mode 100644 index 0000000000..f1abceddda --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - {tt: [k0_x0, k1_x1], d: 'By default, match keys that equal any of the items in the target.'} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + - template: mp.j2 + group: + - {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.} + - {mp: starts_with, tt: [k0, k1], d: Match keys that start with any of the items in the target.} + - {mp: ends_with, tt: [x0, x1], d: Match keys that end with any of the items in target.} + - {mp: regex, tt: ['^.*[01]_x.*$'], d: Match keys by the regex.} + - {mp: regex, tt: '^.*[01]_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + - template: mp.j2 + group: + - {mp: equal, tt: k0_x0, d: Match keys that equal the target.} + - {mp: starts_with, tt: k0, d: Match keys that start with the target.} + - {mp: ends_with, tt: x0, d: Match keys that end with the target.} + - {mp: regex, tt: '^.*0_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0} + - {k0_x0: A1} diff --git a/tests/integration/targets/filter_remove_keys/tasks/main.yml b/tests/integration/targets/filter_remove_keys/tasks/main.yml index d4215d8c59..9c0674780e 100644 --- a/tests/integration/targets/filter_remove_keys/tasks/main.yml +++ b/tests/integration/targets/filter_remove_keys/tasks/main.yml @@ -3,5 +3,5 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Test remove_keys - import_tasks: remove_keys.yml +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml b/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml deleted file mode 100644 index 121cd88cfd..0000000000 --- a/tests/integration/targets/filter_remove_keys/tasks/remove_keys.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Debug ansible_version - ansible.builtin.debug: - var: ansible_version - when: not quite_test | d(true) | bool - tags: ansible_version - -- name: Test remove keys equal (default) - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ list1 | community.general.remove_keys(target=tt) }}" - tt: [k0_x0, k1_x1] - tags: equal_default - -- name: Test remove keys regex string - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ list1 | community.general.remove_keys(target=tt, matching_parameter=mp) }}" - mp: regex - tt: '^.*[01]_x.*$' - tags: regex_string - -- name: Test remove keys targets1 - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - loop: "{{ targets1 }}" - loop_control: - label: "{{ item.mp }}: {{ item.tt }}" - vars: - rr: "{{ list1 | community.general.remove_keys(target=item.tt, matching_parameter=item.mp) }}" - tags: targets1 - -- name: Test remove keys targets2 - ansible.builtin.assert: - that: - - (rr | difference(result2) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - loop: "{{ targets2 }}" - loop_control: - label: "{{ item.mp }}: {{ item.tt }}" - vars: - rr: "{{ list2 | community.general.remove_keys(target=item.tt, matching_parameter=item.mp) }}" - tags: targets1 diff --git a/tests/integration/targets/filter_remove_keys/tasks/tests.yml b/tests/integration/targets/filter_remove_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_remove_keys/templates/default.j2 b/tests/integration/targets/filter_remove_keys/templates/default.j2 new file mode 100644 index 0000000000..0dbc26323f --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.remove_keys(target=target) }} diff --git a/tests/integration/targets/filter_remove_keys/templates/default.j2.license b/tests/integration/targets/filter_remove_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_remove_keys/templates/mp.j2 b/tests/integration/targets/filter_remove_keys/templates/mp.j2 new file mode 100644 index 0000000000..5caa27a9b8 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_remove_keys/templates/mp.j2.license b/tests/integration/targets/filter_remove_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_remove_keys/vars/main.yml b/tests/integration/targets/filter_remove_keys/vars/main.yml deleted file mode 100644 index a52d09a34a..0000000000 --- a/tests/integration/targets/filter_remove_keys/vars/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -targets1: - - {mp: equal, tt: [k0_x0, k1_x1]} - - {mp: starts_with, tt: [k0, k1]} - - {mp: ends_with, tt: [x0, x1]} - - {mp: regex, tt: ['^.*[01]_x.*$']} - - {mp: regex, tt: '^.*[01]_x.*$'} - -list1: - - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} - -result1: - - {k2_x2: [C0], k3_x3: foo} - - {k2_x2: [C1], k3_x3: bar} - -targets2: - - {mp: equal, tt: k0_x0} - - {mp: starts_with, tt: k0} - - {mp: ends_with, tt: x0} - - {mp: regex, tt: '^.*0_x.*$'} - -list2: - - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} - -result2: - - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} diff --git a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml new file mode 100644 index 0000000000..a4767ea799 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - {tt: [k0_x0, k1_x1], d: 'By default, match keys that equal any of the items in the target.'} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.} + - {mp: starts_with, tt: [k0, k1], d: Match keys that start with any of the items in the target.} + - {mp: ends_with, tt: [x0, x1], d: Match keys that end with any of the items in target.} + - {mp: regex, tt: ['^.*[01]_x.*$'], d: Match keys by the regex.} + - {mp: regex, tt: '^.*[01]_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - {mp: equal, tt: k0_x0, d: Match keys that equal the target.} + - {mp: starts_with, tt: k0, d: Match keys that start with the target.} + - {mp: ends_with, tt: x0, d: Match keys that end with the target.} + - {mp: regex, tt: '^.*0_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} diff --git a/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml b/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml deleted file mode 100644 index e324376a5a..0000000000 --- a/tests/integration/targets/filter_replace_keys/tasks/fn-test-replace_keys.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Test replace keys - ansible.builtin.assert: - that: - - (rr | difference(item.result) | length) == 0 - success_msg: | - [OK] {{ item.label }} - result: - {{ rr | to_nice_yaml(indent=2) | indent(2) }} - fail_msg: | - [ERR] {{ item.label }} - result: - {{ rr | to_nice_yaml(indent=2) | indent(2) }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ item.data | - community.general.replace_keys(target=item.target, matching_parameter=item.match) }}" diff --git a/tests/integration/targets/filter_replace_keys/tasks/main.yml b/tests/integration/targets/filter_replace_keys/tasks/main.yml index 35addaf946..9c0674780e 100644 --- a/tests/integration/targets/filter_replace_keys/tasks/main.yml +++ b/tests/integration/targets/filter_replace_keys/tasks/main.yml @@ -3,5 +3,5 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Test replace_keys - import_tasks: replace_keys.yml +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml b/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml deleted file mode 100644 index a57921b81b..0000000000 --- a/tests/integration/targets/filter_replace_keys/tasks/replace_keys.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Debug ansible_version - ansible.builtin.debug: - var: ansible_version - when: not quiet_test | d(true) | bool - tags: ansible_version - -- name: Test replace keys equal (default) - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - vars: - rr: "{{ list1 | community.general.replace_keys(target=tt) }}" - tt: - - {before: k0_x0, after: a0} - - {before: k1_x1, after: a1} - tags: equal_default - -- name: Test replace keys targets1 - ansible.builtin.assert: - that: - - (rr | difference(result1) | length) == 0 - success_msg: | - [OK] result: - {{ rr | to_yaml }} - fail_msg: | - [ERR] result: - {{ rr | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" - loop: "{{ targets1 | dict2items }}" - loop_control: - label: "{{ item.key }}" - vars: - rr: "{{ list1 | community.general.replace_keys(target=item.value, matching_parameter=item.key) }}" - tags: targets1 - -- name: Test replace keys targets2 - include_tasks: - file: fn-test-replace_keys.yml - apply: - tags: targets2 - loop: "{{ targets2 }}" - loop_control: - label: "{{ item.label }}" - tags: targets2 diff --git a/tests/integration/targets/filter_replace_keys/tasks/tests.yml b/tests/integration/targets/filter_replace_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_replace_keys/templates/default.j2 b/tests/integration/targets/filter_replace_keys/templates/default.j2 new file mode 100644 index 0000000000..6ba66cd690 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.replace_keys(target=target) }} diff --git a/tests/integration/targets/filter_replace_keys/templates/default.j2.license b/tests/integration/targets/filter_replace_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_replace_keys/templates/mp.j2 b/tests/integration/targets/filter_replace_keys/templates/mp.j2 new file mode 100644 index 0000000000..70c5009d91 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_replace_keys/templates/mp.j2.license b/tests/integration/targets/filter_replace_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_replace_keys/vars/main.yml b/tests/integration/targets/filter_replace_keys/vars/main.yml deleted file mode 100644 index 167e083960..0000000000 --- a/tests/integration/targets/filter_replace_keys/vars/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list1: - - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} - -result1: - - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} - - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} - -targets1: - equal: - - {before: k0_x0, after: a0} - - {before: k1_x1, after: a1} - starts_with: - - {before: k0, after: a0} - - {before: k1, after: a1} - ends_with: - - {before: x0, after: a0} - - {before: x1, after: a1} - regex: - - {before: "^.*0_x.*$", after: a0} - - {before: "^.*1_x.*$", after: a1} - -list2: - - {aaa1: A, bbb1: B, ccc1: C} - - {aaa2: D, bbb2: E, ccc2: F} - -targets2: - - label: If more keys match the same attribute before the last one will be used. - match: regex - target: - - {before: "^.*_x.*$", after: X} - data: "{{ list1 }}" - result: - - X: foo - - X: bar - - label: If there are items with equal attribute before the first one will be used. - match: regex - target: - - {before: "^.*_x.*$", after: X} - - {before: "^.*_x.*$", after: Y} - data: "{{ list1 }}" - result: - - X: foo - - X: bar - - label: If there are more matches for a key the first one will be used. - match: starts_with - target: - - {before: a, after: X} - - {before: aa, after: Y} - data: "{{ list2 }}" - result: - - {X: A, bbb1: B, ccc1: C} - - {X: D, bbb2: E, ccc2: F} diff --git a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml new file mode 100644 index 0000000000..ca906a770b --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml @@ -0,0 +1,71 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - d: By default, match keys that equal any of the attributes before. + tt: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - d: Replace keys that starts with any of the attributes before. + mp: starts_with + tt: + - {before: k0, after: a0} + - {before: k1, after: a1} + - d: Replace keys that ends with any of the attributes before. + mp: ends_with + tt: + - {before: x0, after: a0} + - {before: x1, after: a1} + - d: Replace keys that match any regex of the attributes before. + mp: regex + tt: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - d: If more keys match the same attribute before the last one will be used. + mp: regex + tt: + - {before: "^.*_x.*$", after: X} + - d: If there are items with equal attribute before the first one will be used. + mp: regex + tt: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - X: foo + - X: bar + - template: mp.j2 + group: + - d: If there are more matches for a key the first one will be used. + mp: starts_with + tt: + - {before: a, after: X} + - {before: aa, after: Y} + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} From b2c773996d0f6e9f09fe1043058eb6db56abddca Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 5 Jul 2024 22:30:34 +0200 Subject: [PATCH 148/482] Add link to forum (#8585) Add link to forum. --- docs/docsite/links.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index bd954c4096..65c8992bba 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -25,3 +25,7 @@ communication: mailing_lists: - topic: Ansible Project List url: https://groups.google.com/g/ansible-project + forums: + - topic: Ansible Forum + # The following URL directly points to the "Get Help" section + url: https://forum.ansible.com/c/help/6/none From a3989095aff0f8ae117b29e9ebac3c65ea765772 Mon Sep 17 00:00:00 2001 From: Elias Probst Date: Sat, 6 Jul 2024 21:16:42 +0200 Subject: [PATCH 149/482] merge_variables: correct misleading short description (#8580) The short description makes it sound like the plugin would only support matching a given suffix, while the actual description clarifies the actual matching capabilities (suffix, prefix or regular expression). Update the short description accordingly. --- plugins/lookup/merge_variables.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index ce7621ad23..6287914747 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -12,7 +12,7 @@ DOCUMENTATION = """ - Mark Ettema (@m-a-r-k-e) - Alexander Petrenz (@alpex8) name: merge_variables - short_description: merge variables with a certain suffix + short_description: merge variables whose names match a given pattern description: - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, optionally. From 45972c23d44380f3f7eb37d19e5a3bfdb37a64f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 09:11:18 +0200 Subject: [PATCH 150/482] Bump fsfe/reuse-action from 3 to 4 (#8596) Bumps [fsfe/reuse-action](https://github.com/fsfe/reuse-action) from 3 to 4. - [Release notes](https://github.com/fsfe/reuse-action/releases) - [Commits](https://github.com/fsfe/reuse-action/compare/v3...v4) --- updated-dependencies: - dependency-name: fsfe/reuse-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/reuse.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml index 031e94cb7a..31afe207c5 100644 --- a/.github/workflows/reuse.yml +++ b/.github/workflows/reuse.yml @@ -27,4 +27,4 @@ jobs: ref: ${{ github.event.pull_request.head.sha || '' }} - name: REUSE Compliance Check - uses: fsfe/reuse-action@v3 + uses: fsfe/reuse-action@v4 From feb1ecbfcddcc417bbae672674e080d3f6d1894d Mon Sep 17 00:00:00 2001 From: cmadarsh <53748644+cmadarsh@users.noreply.github.com> Date: Mon, 8 Jul 2024 13:15:09 +0530 Subject: [PATCH 151/482] Fix to handle Redfish Gen2 Firmware upgrade (#8444) * Fix to handle Redfish Gen2 Firmware upgrade * Fixed sanity checks and unit test cases * Added change log gragment * Updated change log fragment * Updated review comments --------- Co-authored-by: Adarsh Manjunath --- .../8444-fix-redfish-gen2-upgrade.yaml | 2 + plugins/module_utils/wdc_redfish_utils.py | 70 ++++++++++++++++--- .../modules/test_wdc_redfish_command.py | 2 +- 3 files changed, 62 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml diff --git a/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml b/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml new file mode 100644 index 0000000000..d094327240 --- /dev/null +++ b/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml @@ -0,0 +1,2 @@ +minor_changes: + - wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444). diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py index bc4b0c2cd0..8c6fd71bf8 100644 --- a/plugins/module_utils/wdc_redfish_utils.py +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -11,6 +11,7 @@ import datetime import re import time import tarfile +import os from ansible.module_utils.urls import fetch_file from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils @@ -79,19 +80,25 @@ class WdcRedfishUtils(RedfishUtils): return response return self._find_updateservice_additional_uris() - def _is_enclosure_multi_tenant(self): + def _is_enclosure_multi_tenant_and_fetch_gen(self): """Determine if the enclosure is multi-tenant. The serial number of a multi-tenant enclosure will end in "-A" or "-B". + Fetching enclsoure generation. - :return: True/False if the enclosure is multi-tenant or not; None if unable to determine. + :return: True/False if the enclosure is multi-tenant or not and return enclosure generation; + None if unable to determine. """ response = self.get_request(self.root_uri + self.service_root + "Chassis/Enclosure") if response['ret'] is False: return None pattern = r".*-[A,B]" data = response['data'] - return re.match(pattern, data['SerialNumber']) is not None + if 'EnclVersion' not in data: + enc_version = 'G1' + else: + enc_version = data['EnclVersion'] + return re.match(pattern, data['SerialNumber']) is not None, enc_version def _find_updateservice_additional_uris(self): """Find & set WDC-specific update service URIs""" @@ -180,15 +187,44 @@ class WdcRedfishUtils(RedfishUtils): To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile, and checks the appropriate byte in the file. + If not tarfile, the bundle is checked for 2048th byte to determine whether it is Gen2 bundle. + Gen2 is always single tenant at this time. + :param str bundle_uri: HTTP URI of the firmware bundle. - :return: Firmware version number contained in the bundle, and whether or not the bundle is multi-tenant. - Either value will be None if unable to determine. + :return: Firmware version number contained in the bundle, whether or not the bundle is multi-tenant + and bundle generation. Either value will be None if unable to determine. :rtype: str or None, bool or None """ bundle_temp_filename = fetch_file(module=self.module, url=bundle_uri) + bundle_version = None + is_multi_tenant = None + gen = None + + # If not tarfile, then if the file has "MMG2" or "DPG2" at 2048th byte + # then the bundle is for MM or DP G2 if not tarfile.is_tarfile(bundle_temp_filename): - return None, None + cookie1 = None + with open(bundle_temp_filename, "rb") as bundle_file: + file_size = os.path.getsize(bundle_temp_filename) + if file_size >= 2052: + bundle_file.seek(2048) + cookie1 = bundle_file.read(4) + # It is anticipated that DP firmware bundle will be having the value "DPG2" + # for cookie1 in the header + if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": + file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 + parsedFileName = file_name.split('_') + if len(parsedFileName) == 5: + bundle_version = parsedFileName[4] + # MM G2 is always single tanant + is_multi_tenant = False + gen = "G2" + + return bundle_version, is_multi_tenant, gen + + # Bundle is for MM or DP G1 tf = tarfile.open(bundle_temp_filename) pattern_pkg = r"oobm-(.+)\.pkg" pattern_bin = r"(.*\.bin)" @@ -205,8 +241,9 @@ class WdcRedfishUtils(RedfishUtils): bin_file.seek(11) byte_11 = bin_file.read(1) is_multi_tenant = byte_11 == b'\x80' + gen = "G1" - return bundle_version, is_multi_tenant + return bundle_version, is_multi_tenant, gen @staticmethod def uri_is_http(uri): @@ -267,15 +304,16 @@ class WdcRedfishUtils(RedfishUtils): # Check the FW version in the bundle file, and compare it to what is already on the IOMs # Bundle version number - bundle_firmware_version, is_bundle_multi_tenant = self._get_bundle_version(bundle_uri) - if bundle_firmware_version is None or is_bundle_multi_tenant is None: + bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) + if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: return { 'ret': False, - 'msg': 'Unable to extract bundle version or multi-tenant status from update image tarfile' + 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' } + is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() + # Verify that the bundle is correctly multi-tenant or not - is_enclosure_multi_tenant = self._is_enclosure_multi_tenant() if is_enclosure_multi_tenant != is_bundle_multi_tenant: return { 'ret': False, @@ -285,6 +323,16 @@ class WdcRedfishUtils(RedfishUtils): ) } + # Verify that the bundle is compliant with the target enclosure + if enclosure_gen != bundle_gen: + return { + 'ret': False, + 'msg': 'Enclosure generation is {0} but bundle is of {1}'.format( + enclosure_gen, + bundle_gen, + ) + } + # Version number installed on IOMs firmware_inventory = self.get_firmware_inventory() if not firmware_inventory["ret"]: diff --git a/tests/unit/plugins/modules/test_wdc_redfish_command.py b/tests/unit/plugins/modules/test_wdc_redfish_command.py index 332b976f70..0775ac73dd 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_command.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_command.py @@ -289,7 +289,7 @@ def mock_get_firmware_inventory_version_1_2_3(*args, **kwargs): } -ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status from update image tarfile" +ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status or generation from update image file" ACTION_WAS_SUCCESSFUL_MESSAGE = "Action was successful" From b0797d329cb0097b41283cc3f30bb9e556edcace Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 8 Jul 2024 19:45:24 +1200 Subject: [PATCH 152/482] CmdRunner guide (#8592) * initial commit * wip * WIP * progressing * remove copied part * progressing * document as_func() * complete docs about arg formatting * add PythonRunner documentation * fix markup * improve markup * multiple changes - revamp the cmd_runner_fmt doc - add CmdRunner instance and context parameters - changed formatting of other blocks * fix typo + reduce tables sizes * improve text * fixes and adjustments * fix sanity * fix version added --- .github/BOTMETA.yml | 6 +- docs/docsite/extra-docs.yml | 1 + docs/docsite/rst/guide_cmdrunner.rst | 463 +++++++++++++++++++++++++++ 3 files changed, 468 insertions(+), 2 deletions(-) create mode 100644 docs/docsite/rst/guide_cmdrunner.rst diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 17659231fb..62b094da75 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1468,14 +1468,16 @@ files: maintainers: ericzolf docs/docsite/rst/guide_alicloud.rst: maintainers: xiaozhu36 + docs/docsite/rst/guide_cmdrunner.rst: + maintainers: russoz + docs/docsite/rst/guide_deps.rst: + maintainers: russoz docs/docsite/rst/guide_online.rst: maintainers: remyleone docs/docsite/rst/guide_packet.rst: maintainers: baldwinSPC nurfet-becirevic t0mk teebes docs/docsite/rst/guide_scaleway.rst: maintainers: $team_scaleway - docs/docsite/rst/guide_deps.rst: - maintainers: russoz docs/docsite/rst/guide_vardict.rst: maintainers: russoz docs/docsite/rst/test_guide.rst: diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 3bed9e35fc..aebe82f42e 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -18,3 +18,4 @@ sections: toctree: - guide_deps - guide_vardict + - guide_cmdrunner diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst new file mode 100644 index 0000000000..d4f12cf81e --- /dev/null +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -0,0 +1,463 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_cmdrunner: + + +Command Runner guide +==================== + + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the +``CmdRunner`` class to help execute external commands. The class is a wrapper around +the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting, +output processing output, check mode, and other features. + +It is even more useful when one command is used in multiple modules, so that you can define all options +in a module util file, and each module uses the same runner with different arguments. + +For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to +Ansible module options, and the term *argument* when referring to the command line arguments for the external command. + + +Quickstart +"""""""""" + +``CmdRunner`` defines a command and a set of coded instructions on how to format +the command-line arguments, in which specific order, for a particular execution. +It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command. +There are other features, see more details throughout this document. + +To use ``CmdRunner`` you must start by creating an object. The example below is a simplified +version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + runner = CmdRunner( + module, + command="ansible-galaxy", + arg_formats=dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + ) + +This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed: + +.. code-block:: python + + # Run the command with these arguments, when values exist for them + with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + + # version is fixed, requires no value + with runner("version") as ctx: + dummy, stdout, dummy = ctx.run() + + # Another way of expressing it + dummy, stdout, dummy = runner("version").run() + +Note that you can pass values for the arguments when calling ``run()``, +otherwise ``CmdRunner`` uses the module options with the exact same names to +provide values for the runner arguments. If no value is passed and no module option +is found for the name specified, then an exception is raised, unless the +argument is using ``cmd_runner_fmt.as_fixed`` as format function like the +``version`` in the example above. See more about it below. + +In the first example, values of ``type``, ``force``, ``no_deps`` and others +are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are +passed explicitly. + +That generates a resulting command line similar to (example taken from the +output of an integration test): + +.. code-block:: python + + [ + "/bin/ansible-galaxy", + "collection", + "install", + "--upgrade", + "-p", + "", + "netbox.netbox", + ] + + +Argument formats +^^^^^^^^^^^^^^^^ + +As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats`` +defining how to format each CLI named argument. +An "argument format" is nothing but a function to transform the value of a variable +into something formatted for the command line. + + +Argument format function +"""""""""""""""""""""""" + +An ``arg_format`` function should be of the form: + +.. code-block:: python + + def func(value): + return ["--some-param-name", value] + +The parameter ``value`` can be of any type - although there are convenience +mechanisms to help handling sequence and mapping objects. + +The result is expected to be of the type ``Sequence[str]`` type (most commonly +``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``, +and it is coerced into ``list[str]``. +This resulting sequence of strings is added to the command line when that +argument is actually used. + +For example, if ``func`` returns: + +- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``. +- ``2 == 2``, the command line adds argument ``True``. +- ``None``, the command line adds argument ``None``. +- ``[]``, the command line adds no command line argument for that particular argument. + + +Convenience format methods +"""""""""""""""""""""""""" + +In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which +provides a set of convenience methods that return format functions for common cases. +In the first block of code in the `Quickstart`_ section you can see the importing of +that class: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object. +A description of each one of the convenience methods available and examples of how to use them is found below. +In these descriptions ``value`` refers to the single parameter passed to the formatting function. + +- ``cmd_runner_fmt.as_list()`` + This method does not receive any parameter, function returns ``value`` as-is. + + - Creation: + ``cmd_runner_fmt.as_list()`` + - Example: + +----------------------+---------------------+ + | Value | Outcome | + +======================+=====================+ + | ``["foo", "bar"]`` | ``["foo", "bar"]`` | + +----------------------+---------------------+ + | ``"foobar"`` | ``["foobar"]`` | + +----------------------+---------------------+ + +- ``cmd_runner_fmt.as_bool()`` + This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. + If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. + If the boolean evaluation is ``False``, then the function returns ``args_false`` + if it was provided, or ``[]`` otherwise. + + - Creation: + ``cmd_runner_fmt.as_bool("--force")`` + - Example: + +------------+--------------------+ + | Value | Outcome | + +============+====================+ + | ``True`` | ``["--force"]`` | + +------------+--------------------+ + | ``False`` | ``[]`` | + +------------+--------------------+ + +- ``cmd_runner_fmt.as_bool_not()`` + This method receives one parameter, which is returned by the function when the boolean evaluation + of ``value`` is ``False``. + + - Creation: + ``cmd_runner_fmt.as_bool_not("--no-deps")`` + - Example: + +-------------+---------------------+ + | Value | Outcome | + +=============+=====================+ + | ``True`` | ``[]`` | + +-------------+---------------------+ + | ``False`` | ``["--no-deps"]`` | + +-------------+---------------------+ + +- ``cmd_runner_fmt.as_optval()`` + This method receives one parameter ``arg``, the function returns the string concatenation + of ``arg`` and ``value``. + + - Creation: + ``cmd_runner_fmt.as_optval("-i")`` + - Example: + +---------------+---------------------+ + | Value | Outcome | + +===============+=====================+ + | ``3`` | ``["-i3"]`` | + +---------------+---------------------+ + | ``foobar`` | ``["-ifoobar"]`` | + +---------------+---------------------+ + +- ``cmd_runner_fmt.as_opt_val()`` + This method receives one parameter ``arg``, the function returns ``[arg, value]``. + + - Creation: + ``cmd_runner_fmt.as_opt_val("--name")`` + - Example: + +--------------+--------------------------+ + | Value | Outcome | + +==============+==========================+ + | ``abc`` | ``["--name", "abc"]`` | + +--------------+--------------------------+ + +- ``cmd_runner_fmt.as_opt_eq_val()`` + This method receives one parameter ``arg``, the function returns the string of the form + ``{arg}={value}``. + + - Creation: + ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` + - Example: + +------------+-------------------------+ + | Value | Outcome | + +============+=========================+ + | ``10`` | ``["--num-cpus=10"]`` | + +------------+-------------------------+ + +- ``cmd_runner_fmt.as_fixed()`` + This method receives one parameter ``arg``, the function expects no ``value`` - if one + is provided then it is ignored. + The function returns ``arg`` as-is. + + - Creation: + ``cmd_runner_fmt.as_fixed("--version")`` + - Example: + +---------+-----------------------+ + | Value | Outcome | + +=========+=======================+ + | | ``["--version"]`` | + +---------+-----------------------+ + | 57 | ``["--version"]`` | + +---------+-----------------------+ + + - Note: + This is the only special case in which a value can be missing for the formatting function. + The example also comes from the code in `Quickstart`_. + In that case, the module has code to determine the command's version so that it can assert compatibility. + There is no *value* to be passed for that CLI argument. + +- ``cmd_runner_fmt.as_map()`` + This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``. + The function returns the evaluation of ``arg[value]``. + If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``. + + - Creation: + ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` + - Example: + +---------------------+---------------+ + | Value | Outcome | + +=====================+===============+ + | ``"b"`` | ``["2"]`` | + +---------------------+---------------+ + | ``"yabadabadoo"`` | ``["42"]`` | + +---------------------+---------------+ + + - Note: + If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored. + +- ``cmd_runner_fmt.as_func()`` + This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above. + + - Creation: + ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])`` + - Note: + The outcome for that depends entirely on the function provided by the developer. + + +Other features for argument formatting +"""""""""""""""""""""""""""""""""""""" + +Some additional features are available as decorators: + +- ``cmd_runner_fmt.unpack args()`` + This decorator unpacks the incoming ``value`` as a list of elements. + + For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as: + + .. code-block:: python + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + # ... + _execute=cmd_runner_fmt.as_func(execute_func), + # ... + ), + ) + + Then, in :ansplugin:`community.general.puppet#module` it is put to use with: + + .. code-block:: python + + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + +- ``cmd_runner_fmt.unpack_kwargs()`` + Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object. + +- ``cmd_runner_fmt.stack()`` + This decorator assumes ``value`` is a sequence and concatenates the output + of the wrapped function applied to each element of the sequence. + + For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database`` + is defined as: + + .. code-block:: python + + arg_formats = dict( + # ... + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + # ... + ) + + When receiving a list ``["abc", "def"]``, the output is: + + .. code-block:: python + + ["--database", "abc", "--database", "def"] + + +Command Runner +^^^^^^^^^^^^^^ + +Settings that can be passed to the ``CmdRunner`` constructor are: + +- ``module: AnsibleModule`` + Module instance. Mandatory parameter. +- ``command: str | list[str]`` + Command to be executed. It can be a single string, the executable name, or a list + of strings containing the executable name as the first element and, optionally, fixed parameters. + Those parameters are used in all executions of the runner. +- ``arg_formats: dict`` + Mapping of argument names to formatting functions. +- ``default_args_order: str`` + As the name suggests, a default ordering for the arguments. When + this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``. +- ``check_rc: bool`` + When ``True``, if the return code from the command is not zero, the module exits + with an error. Defaults to ``False``. +- ``path_prefix: list[str]`` + If the command being executed is installed in a non-standard directory path, + additional paths might be provided to search for the executable. Defaults to ``None``. +- ``environ_update: dict`` + Pass additional environment variables to be set during the command execution. + Defaults to ``None``. +- ``force_lang: str`` + It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable. + Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``. + To disable this mechanism, set this parameter to ``None``. + In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect + that ``CmdRunner`` then tries to determine the best parseable locale for the runtime. + It should become the default value in the future, but for the time being the default value is ``C``. + +When creating a context, the additional settings that can be passed to the call are: + +- ``args_order: str`` + Establishes the order in which the arguments are rendered in the command line. + This parameter is mandatory unless ``default_args_order`` was provided to the runner instance. +- ``output_process: func`` + Function to transform the output of the executable into different values or formats. + See examples in section below. +- ``check_mode_skip: bool`` + Whether to skip the actual execution of the command when the module is in check mode. + Defaults to ``False``. +- ``check_mode_return: any`` + If ``check_mode_skip=True``, then return this value instead. + +Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior +might occur if redefining options already present in the runner or its context creation. Use with caution. + + +Processing results +^^^^^^^^^^^^^^^^^^ + +As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command, +and it passes the return value from that method back to caller. That means that, +by default, the result is going to be a tuple ``(rc, stdout, stderr)``. + +If you need to transform or process that output, you can pass a function to the context, +as the ``output_process`` parameter. It must be a function like: + +.. code-block:: python + + def process(rc, stdout, stderr): + # do some magic + return processed_value # whatever that is + +In that case, the return of ``run()`` is the ``processed_value`` returned by the function. + + +PythonRunner +^^^^^^^^^^^^ + +The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of +Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + runner = PythonRunner( + module, + command=["-m", "django"], + arg_formats=dict(...), + python="python", + venv="/path/to/some/venv", + ) + +The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``. + +The command line produced by such a command with ``python="python3.12"`` is something like: + +.. code-block:: shell + + /usr/bin/python3.12 -m django ... + +And the command line for ``venv="/work/venv"`` is like: + +.. code-block:: shell + + /work/venv/bin/python -m django ... + +You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name) +or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above. +See `Command line and environment `_ for more details. + +If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used +as-is, otherwise the runtime ``PATH`` is searched for that command name. + +Other than that, everything else works as in ``CmdRunner``. + +.. versionadded:: 4.8.0 From 8451fc36ca002f04802aebe46391e1928d8ba35a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 8 Jul 2024 22:45:05 +0200 Subject: [PATCH 153/482] Remove EOL'ed FreeBSD 13.2 from CI (#8607) Remove EOL'ed FreeBSD 13.2 from CI. Apparently the packages are no longer available. --- .azure-pipelines/azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 7c6470b8da..8db5107f4c 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -232,8 +232,8 @@ stages: test: rhel/9.2 - name: RHEL 8.8 test: rhel/8.8 - - name: FreeBSD 13.2 - test: freebsd/13.2 + # - name: FreeBSD 13.2 + # test: freebsd/13.2 groups: - 1 - 2 From e794fa74da56a66ac6a01eefaefcb2755e093336 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 11 Jul 2024 22:46:36 +0200 Subject: [PATCH 154/482] Reformat and re-order changelogs/changelog.yaml. --- changelogs/changelog.yaml | 1 + changelogs/config.yaml | 34 ++++++++++++++++++---------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 8d0ae9aa6d..5aa97d97e9 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,3 @@ +--- ancestor: 9.0.0 releases: {} diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 2cef6e26f4..32ffe27f2b 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -18,23 +18,25 @@ output_formats: prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Community General trivial_section_name: trivial use_fqcn: true add_plugin_period: true +changelog_nice_yaml: true +changelog_sort: version From ca8ecb1df1544ba702a422b32f927778a80bd6b9 Mon Sep 17 00:00:00 2001 From: Mike Koreneff Date: Fri, 12 Jul 2024 22:08:43 +0100 Subject: [PATCH 155/482] redfish_utils: fix language check (#8613) * redfish_utils: fix language check * add fragment file * typo * improve words * improve words based on suggestion --------- Co-authored-by: Mike Koreneff --- changelogs/fragments/8613-redfish_utils-language.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8613-redfish_utils-language.yaml diff --git a/changelogs/fragments/8613-redfish_utils-language.yaml b/changelogs/fragments/8613-redfish_utils-language.yaml new file mode 100644 index 0000000000..1fc43c895d --- /dev/null +++ b/changelogs/fragments/8613-redfish_utils-language.yaml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 4240b9e4e7..edfc612466 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -3833,7 +3833,7 @@ class RedfishUtils(object): vendor = self._get_vendor()['Vendor'] rsp_uri = "" for loc in resp_data['Location']: - if loc['Language'] == "en": + if loc['Language'].startswith("en"): rsp_uri = loc['Uri'] if vendor == 'HPE': # WORKAROUND From 8990f97b45e69ce9709e03b5adb13ef3d968b3f5 Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Sat, 13 Jul 2024 13:32:36 +0200 Subject: [PATCH 156/482] New Plugins #8594 (#8595) * Implement #8594 * Fix lint and BOTMETA entries. * Fix BOTMETA * Consolidate argument check, code simplification, and formatting. Remove test vars. * Fix lint. * retrigger checks * Update plugins/plugin_utils/ansible_type.py Co-authored-by: Felix Fontein * Update plugins/test/ansible_type.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 6 + plugins/filter/reveal_ansible_type.py | 134 ++++++++++ plugins/plugin_utils/ansible_type.py | 47 ++++ plugins/test/ansible_type.py | 203 ++++++++++++++ .../filter_reveal_ansible_type/aliases | 5 + .../filter_reveal_ansible_type/tasks/main.yml | 7 + .../tasks/tasks.yml | 185 +++++++++++++ .../targets/test_ansible_type/aliases | 5 + .../targets/test_ansible_type/tasks/main.yml | 7 + .../targets/test_ansible_type/tasks/tasks.yml | 248 ++++++++++++++++++ 10 files changed, 847 insertions(+) create mode 100644 plugins/filter/reveal_ansible_type.py create mode 100644 plugins/plugin_utils/ansible_type.py create mode 100644 plugins/test/ansible_type.py create mode 100644 tests/integration/targets/filter_reveal_ansible_type/aliases create mode 100644 tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml create mode 100644 tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml create mode 100644 tests/integration/targets/test_ansible_type/aliases create mode 100644 tests/integration/targets/test_ansible_type/tasks/main.yml create mode 100644 tests/integration/targets/test_ansible_type/tasks/tasks.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 62b094da75..9eb521018f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -176,6 +176,8 @@ files: maintainers: vbotka $filters/replace_keys.py: maintainers: vbotka + $filters/reveal_ansible_type.py: + maintainers: vbotka $filters/time.py: maintainers: resmo $filters/to_days.yml: @@ -1425,12 +1427,16 @@ files: ignore: matze labels: zypper maintainers: $team_suse + $plugin_utils/ansible_type.py: + maintainers: vbotka $plugin_utils/keys_filter.py: maintainers: vbotka $plugin_utils/unsafe.py: maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein + $tests/ansible_type.py: + maintainers: vbotka $tests/fqdn_valid.py: maintainers: vbotka ######################### diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py new file mode 100644 index 0000000000..916aaff930 --- /dev/null +++ b/plugins/filter/reveal_ansible_type.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: reveal_ansible_type + short_description: Return input type + version_added: "9.2.0" + author: Vladimir Botka (@vbotka) + description: This filter returns input type. + options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +''' + +EXAMPLES = ''' +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +# String. AnsibleUnicode. +data: "abc" +result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode + +# String. AnsibleUnicode alias str. +alias: {"AnsibleUnicode": "str"} +data: "abc" +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => str + +# List. All items are AnsibleUnicode. +data: ["a", "b", "c"] +result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] + +# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. +data: {"a": "foo", "b": "bar", "c": "baz"} +result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +result: '{{ "abc" | community.general.reveal_ansible_type }}' +# result => str + +# Integer +result: '{{ 123 | community.general.reveal_ansible_type }}' +# result => int + +# Float +result: '{{ 123.45 | community.general.reveal_ansible_type }}' +# result => float + +# Boolean +result: '{{ true | community.general.reveal_ansible_type }}' +# result => bool + +# List. All items are strings. +result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +# result => list[str] + +# List of dictionaries. +result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +# result => list[dict] + +# Dictionary. All keys are strings. All values are integers. +result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Dictionary. All keys are strings. All values are integers. +result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +# Dictionary. The keys are integers or strings. All values are strings. +alias: {"AnsibleUnicode": "str"} +data: {1: 'a', 'b': 'b'} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int|str, str] + +# Dictionary. All keys are integers. All values are keys. +alias: {"AnsibleUnicode": "str"} +data: {1: 'a', 2: 'b'} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int, str] + +# Dictionary. All keys are strings. Multiple types values. +alias: {"AnsibleUnicode": "str"} +data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[str, bool|dict|float|int|list|str] + +# List. Multiple types items. +alias: {"AnsibleUnicode": "str"} +data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => list[bool|dict|float|int|list|str] +''' + +RETURN = ''' + _value: + description: Type of the data. + type: str +''' + +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def reveal_ansible_type(data, alias=None): + """Returns data type""" + + return _ansible_type(data, alias) + + +class FilterModule(object): + + def filters(self): + return { + 'reveal_ansible_type': reveal_ansible_type + } diff --git a/plugins/plugin_utils/ansible_type.py b/plugins/plugin_utils/ansible_type.py new file mode 100644 index 0000000000..ab78b78927 --- /dev/null +++ b/plugins/plugin_utils/ansible_type.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Mapping + + +def _atype(data, alias): + """ + Returns the name of the type class. + """ + + data_type = type(data).__name__ + return alias.get(data_type, data_type) + + +def _ansible_type(data, alias): + """ + Returns the Ansible data type. + """ + + if alias is None: + alias = {} + + if not isinstance(alias, Mapping): + msg = "The argument alias must be a dictionary. %s is %s" + raise AnsibleFilterError(msg % (alias, type(alias))) + + data_type = _atype(data, alias) + + if data_type == 'list' and len(data) > 0: + items = [_atype(i, alias) for i in data] + items_type = '|'.join(sorted(set(items))) + return ''.join((data_type, '[', items_type, ']')) + + if data_type == 'dict' and len(data) > 0: + keys = [_atype(i, alias) for i in data.keys()] + vals = [_atype(i, alias) for i in data.values()] + keys_type = '|'.join(sorted(set(keys))) + vals_type = '|'.join(sorted(set(vals))) + return ''.join((data_type, '[', keys_type, ', ', vals_type, ']')) + + return data_type diff --git a/plugins/test/ansible_type.py b/plugins/test/ansible_type.py new file mode 100644 index 0000000000..9ac5e138eb --- /dev/null +++ b/plugins/test/ansible_type.py @@ -0,0 +1,203 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: ansible_type + short_description: Validate input type + version_added: "9.2.0" + author: Vladimir Botka (@vbotka) + description: This test validates input type. + options: + _input: + description: Input data. + type: raw + required: true + dtype: + description: A single data type, or a data types list to be validated. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +''' + +EXAMPLES = ''' + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +# String. AnsibleUnicode. +dtype: AnsibleUnicode +data: "abc" +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# String. AnsibleUnicode alias str. +alias: {"AnsibleUnicode": "str"} +dtype: str +data: "abc" +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# List. All items are AnsibleUnicode. +dtype: list[AnsibleUnicode] +data: ["a", "b", "c"] +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. +dtype: dict[AnsibleUnicode, AnsibleUnicode] +data: {"a": "foo", "b": "bar", "c": "baz"} +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +dtype: str +result: '{{ "abc" is community.general.ansible_type(dtype) }}' +# result => true + +# Integer +dtype: int +result: '{{ 123 is community.general.ansible_type(dtype) }}' +# result => true + +# Float +dtype: float +result: '{{ 123.45 is community.general.ansible_type(dtype) }}' +# result => true + +# Boolean +dtype: bool +result: '{{ true is community.general.ansible_type(dtype) }}' +# result => true + +# List. All items are strings. +dtype: list[str] +result: '{{ ["a", "b", "c"] is community.general.ansible_type(dtype) }}' +# result => true + +# List of dictionaries. +dtype: list[dict] +result: '{{ [{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are strings. All values are integers. +dtype: dict[str, int] +result: '{{ {"a": 1} is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are strings. All values are integers. +dtype: dict[str, int] +result: '{{ {"a": 1, "b": 2} is community.general.ansible_type(dtype) }}' +# result => true + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +# Dictionary. The keys are integers or strings. All values are strings. +alias: {"AnsibleUnicode": "str"} +dtype: dict[int|str, str] +data: {1: 'a', 'b': 'b'} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Dictionary. All keys are integers. All values are keys. +alias: {"AnsibleUnicode": "str"} +dtype: dict[int, str] +data: {1: 'a', 2: 'b'} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Dictionary. All keys are strings. Multiple types values. +alias: {"AnsibleUnicode": "str"} +dtype: dict[str, bool|dict|float|int|list|str] +data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# List. Multiple types items. +alias: {"AnsibleUnicode": "str"} +dtype: list[bool|dict|float|int|list|str] +data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Option dtype is list +# -------------------- + +# AnsibleUnicode or str +dtype: ['AnsibleUnicode', 'str'] +data: abc +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# float or int +dtype: ['float', 'int'] +data: 123 +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# float or int +dtype: ['float', 'int'] +data: 123.45 +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# Multiple alias +# -------------- + +# int alias number +alias: {"int": "number", "float": "number"} +dtype: number +data: 123 +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# float alias number +alias: {"int": "number", "float": "number"} +dtype: number +data: 123.45 +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true +''' + +RETURN = ''' + _value: + description: Whether the data type is valid. + type: bool +''' + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Sequence +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def ansible_type(data, dtype, alias=None): + """Validates data type""" + + if not isinstance(dtype, Sequence): + msg = "The argument dtype must be a string or a list. dtype is %s." + raise AnsibleFilterError(msg % (dtype, type(dtype))) + + if isinstance(dtype, str): + data_types = [dtype] + else: + data_types = dtype + + return _ansible_type(data, alias) in data_types + + +class TestModule(object): + + def tests(self): + return { + 'ansible_type': ansible_type + } diff --git a/tests/integration/targets/filter_reveal_ansible_type/aliases b/tests/integration/targets/filter_reveal_ansible_type/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml new file mode 100644 index 0000000000..c890c11901 --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Integration tests + import_tasks: tasks.yml diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml new file mode 100644 index 0000000000..37d3abcb71 --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml @@ -0,0 +1,185 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +- name: String. AnsibleUnicode. + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'AnsibleUnicode' + +- name: String. AnsibleUnicode alias str. + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: 'str' + +- name: List. All items are AnsibleUnicode. + assert: + that: result == dtype + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'list[AnsibleUnicode]' + +- name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. + assert: + that: result == dtype + success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' + fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +- name: String + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ "abc" | community.general.reveal_ansible_type }}' + dtype: str + +- name: Integer + assert: + that: result == dtype + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123 | community.general.reveal_ansible_type }}' + dtype: int + +- name: Float + assert: + that: result == dtype + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123.45 | community.general.reveal_ansible_type }}' + dtype: float + +- name: Boolean + assert: + that: result == dtype + success_msg: 'true is {{ dtype }}' + fail_msg: 'true is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ true | community.general.reveal_ansible_type }}' + dtype: bool + +- name: List. All items are strings. + assert: + that: result == dtype + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' + dtype: list[str] + +- name: List of dictionaries. + assert: + that: result == dtype + success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' + dtype: list[dict] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: result == dtype + success_msg: '{"a": 1} is {{ dtype }}' + fail_msg: '{"a": 1} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: result == dtype + success_msg: '{"a": 1, "b": 2} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +- name: Dictionary. The keys are integers or strings. All values are strings. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int|str, str] + +- name: Dictionary. All keys are integers. All values are keys. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int, str] + +- name: Dictionary. All keys are strings. Multiple types values. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[str, bool|dict|float|int|list|str] + +- name: List. Multiple types items. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: list[bool|dict|float|int|list|str] diff --git a/tests/integration/targets/test_ansible_type/aliases b/tests/integration/targets/test_ansible_type/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/test_ansible_type/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/test_ansible_type/tasks/main.yml b/tests/integration/targets/test_ansible_type/tasks/main.yml new file mode 100644 index 0000000000..c890c11901 --- /dev/null +++ b/tests/integration/targets/test_ansible_type/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Integration tests + import_tasks: tasks.yml diff --git a/tests/integration/targets/test_ansible_type/tasks/tasks.yml b/tests/integration/targets/test_ansible_type/tasks/tasks.yml new file mode 100644 index 0000000000..261256c0d4 --- /dev/null +++ b/tests/integration/targets/test_ansible_type/tasks/tasks.yml @@ -0,0 +1,248 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +- name: String. AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'AnsibleUnicode' + +- name: String. AnsibleUnicode alias str. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: 'str' + +- name: List. All items are AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'list[AnsibleUnicode]' + +- name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' + fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +- name: String + assert: + that: '"abc" is community.general.ansible_type(dtype)' + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ "abc" | community.general.reveal_ansible_type }}' + dtype: str + +- name: Integer + assert: + that: '123 is community.general.ansible_type(dtype)' + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123 | community.general.reveal_ansible_type }}' + dtype: int + +- name: Float + assert: + that: '123.45 is community.general.ansible_type(dtype)' + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123.45 | community.general.reveal_ansible_type }}' + dtype: float + +- name: Boolean + assert: + that: 'true is community.general.ansible_type(dtype)' + success_msg: 'true is {{ dtype }}' + fail_msg: 'true is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ true | community.general.reveal_ansible_type }}' + dtype: bool + +- name: List. All items are strings. + assert: + that: '["a", "b", "c"] is community.general.ansible_type(dtype)' + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' + dtype: list[str] + +- name: List of dictionaries. + assert: + that: '[{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype)' + success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' + dtype: list[dict] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: '{"a": 1} is community.general.ansible_type(dtype)' + success_msg: '{"a": 1} is {{ dtype }}' + fail_msg: '{"a": 1} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: '{"a": 1, "b": 2} is community.general.ansible_type(dtype)' + success_msg: '{"a": 1, "b": 2} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +- name: Dictionary. The keys are integers or strings. All values are strings. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"1": "a", "b": "b"} is {{ dtype }}' + fail_msg: '{"1": "a", "b": "b"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int|str, str] + +- name: Dictionary. All keys are integers. All values are keys. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"1": "a", "2": "b"} is {{ dtype }}' + fail_msg: '{"1": "a", "2": "b"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int, str] + +- name: Dictionary. All keys are strings. Multiple types values. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[str, bool|dict|float|int|list|str] + +- name: List. Multiple types items. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ dtype }}' + fail_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: list[bool|dict|float|int|list|str] + +# Option dtype is list +# -------------------- + +- name: AnsibleUnicode or str + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: abc + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['AnsibleUnicode', 'str'] + +- name: float or int + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: 123 + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['float', 'int'] + +- name: float or int + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: 123.45 + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['float', 'int'] + +# Multiple alias +# -------------- + +- name: int alias number + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"int": "number", "float": "number"} + data: 123 + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: number + +- name: float alias number + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"int": "number", "float": "number"} + data: 123.45 + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: number From 9f3103e89176726413e0911605d4919ef6097fb6 Mon Sep 17 00:00:00 2001 From: Daskan Date: Sun, 14 Jul 2024 12:05:53 +0200 Subject: [PATCH 157/482] Add example to rpm_ostree_pkg (#8556) * Update rpm_ostree_pkg.py expand examples list with 'until' example * Apply suggestions from code review. --------- Co-authored-by: Felix Fontein --- plugins/modules/rpm_ostree_pkg.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index 826c33f2d1..1a02b2d71c 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -55,6 +55,17 @@ EXAMPLES = r''' community.general.rpm_ostree_pkg: name: nfs-utils state: absent + +# In case a different transaction is currently running the module would fail. +# Adding a delay can help mitigate this problem: +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + register: rpm_ostree_pkg + until: rpm_ostree_pkg is not failed + retries: 10 + dealy: 30 ''' RETURN = r''' From 21b16c1c77f732e2c763138ffe03ac80a3897214 Mon Sep 17 00:00:00 2001 From: lyrandy <42095565+lyrandy@users.noreply.github.com> Date: Sun, 14 Jul 2024 06:06:37 -0400 Subject: [PATCH 158/482] Update VirtualBox Group parsing to align with documentation. (#8510) * Update VirtualBox Group parsing to align with documentation. Previously, we could separate the group string on the `/` char and consider each element to be distinct, top-level groups. This change implements the notion of nested groups and the use of the `,` char to split multiple groups. * Address code review comments. Changed the implementation from a breaking change to a minor change by introducing a new parameter to configure the behaviour. Keep the default values to maintain the existing behaviour, and allow consumers an option to opt-in. * Fix line length. The long lines were tripping CI. Reduce the length. * Apply suggestions from code review Update documentation to match expected conventions and correct the final rendered formatting. Set the initial parent_group to `None` instead of `all` and rely on the parent class' inventory reconciliation logic to ensure consistent behaviour across different inventory plugins. Co-authored-by: Felix Fontein * Reword module arg description to avoid issues with CI. One of the lines ended with a colon character which made the CI tests fail since it would interpret it as a YAML key. Reworded the description altogether to avoid that issue. * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8508-virtualbox-inventory.yml | 3 + plugins/inventory/virtualbox.py | 85 +++++++++++++++++-- 2 files changed, 80 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8508-virtualbox-inventory.yml diff --git a/changelogs/fragments/8508-virtualbox-inventory.yml b/changelogs/fragments/8508-virtualbox-inventory.yml new file mode 100644 index 0000000000..dd14818331 --- /dev/null +++ b/changelogs/fragments/8508-virtualbox-inventory.yml @@ -0,0 +1,3 @@ +minor_changes: + - >- + virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510). \ No newline at end of file diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 79b04ec722..425ed91642 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -14,6 +14,8 @@ DOCUMENTATION = ''' - Get inventory hosts from the local virtualbox installation. - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation for details. extends_documentation_fragment: - constructed - inventory_cache @@ -35,6 +37,19 @@ DOCUMENTATION = ''' description: create vars from virtualbox properties type: dictionary default: {} + enable_advanced_group_parsing: + description: + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based on the V(/) character and + assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups according to + U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). + Groups are now split using the V(,) character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") will result in + the group C(TestGroup2) being a child group of C(TestGroup); and + the VM being a part of C(TestGroup2) and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 ''' EXAMPLES = ''' @@ -177,14 +192,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # found groups elif k == 'Groups': - for group in v.split('/'): - if group: - group = make_unsafe(group) - group = self.inventory.add_group(group) - self.inventory.add_child(group, current_host) - if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + if self.get_option('enable_advanced_group_parsing'): + self._handle_vboxmanage_group_string(v, current_host, cacheable_results) + else: + self._handle_group_string(v, current_host, cacheable_results) continue else: @@ -227,6 +238,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return all(find_host(host, inventory)) + def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + # The original implementation of this inventory plugin treated `/` as + # a delimeter to split and use as Ansible Groups. + for group in vboxmanage_group.split('/'): + if group: + group = make_unsafe(group) + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + + def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + # Per the VirtualBox documentation, a VM can be part of many groups, + # and it's possible to have nested groups. + # Many groups are separated by commas ",", and nested groups use + # slash "/". + # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups + # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" + # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" + + for group in vboxmanage_group.split(','): + if not group: + # We could get an empty element due how to split works, and + # possible assignments from VirtualBox. e.g. ,/Group1 + continue + + if group == "/": + # This is the "root" group. We get here if the VM was not + # assigned to a particular group. Consider the host to be + # unassigned to a group. + continue + + parent_group = None + for subgroup in group.split('/'): + if not subgroup: + # Similarly to above, we could get an empty element. + # e.g //Group1 + continue + + if subgroup == '/': + # "root" group. + # Consider the host to be unassigned + continue + + subgroup = make_unsafe(subgroup) + subgroup = self.inventory.add_group(subgroup) + if parent_group is not None: + self.inventory.add_child(parent_group, subgroup) + self.inventory.add_child(subgroup, current_host) + if subgroup not in cacheable_results: + cacheable_results[subgroup] = {'hosts': []} + cacheable_results[subgroup]['hosts'].append(current_host) + + parent_group = subgroup + def verify_file(self, path): valid = False From 6cefde622cac8bf0d20203dd32e0e8fd96ba68fe Mon Sep 17 00:00:00 2001 From: JL Euler Date: Sun, 14 Jul 2024 12:07:05 +0200 Subject: [PATCH 159/482] Improve Proxmox volume handling (#8542) * proxmox: basic linting using black via trunk.io * proxmox: refactor mount handling (#8407) - make mount creation idempotent: Mounts created using the special syntax ":" no longer create a new volume each time - add new keys for easier mount creation & management * proxmox: add changelog fragment * proxmox(fix): fix occasional syntax error * Update changelogs/fragments/8542-fix-proxmox-volume-handling.yml Link to pull request Co-authored-by: Felix Fontein * Update documentation - Fix options defined as values - Document mutual exclusivity - Fix option hierarchy - Add version_added tag * Revert "proxmox: basic linting" This reverts commit ca7214f60e7b517fa681089ee55ab0a1fed44fd4. * proxmox: Fix documentation * Fix list identifier in documentation * pass volume options as dict instead of list * Update plugins/modules/proxmox.py Update documentation wording Co-authored-by: Felix Fontein * Update plugins/modules/proxmox.py Update documentation wording Co-authored-by: Felix Fontein * proxmox: ensure values of `disk_volume` and `mount_volumes.*` dicts are strings * proxmox(fix): correct indentation * Apply suggestions from code review: punctuation Add suggested punctuation to documentation Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update plugins/modules/proxmox.py: vol_string building Accept suggested review change Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * proxmox: Use better string check and conversion --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .../8542-fix-proxmox-volume-handling.yml | 5 + plugins/modules/proxmox.py | 328 +++++++++++++++++- 2 files changed, 318 insertions(+), 15 deletions(-) create mode 100644 changelogs/fragments/8542-fix-proxmox-volume-handling.yml diff --git a/changelogs/fragments/8542-fix-proxmox-volume-handling.yml b/changelogs/fragments/8542-fix-proxmox-volume-handling.yml new file mode 100644 index 0000000000..9b982c0aeb --- /dev/null +++ b/changelogs/fragments/8542-fix-proxmox-volume-handling.yml @@ -0,0 +1,5 @@ +bugfixes: + - proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542). +minor_changes: + - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542). + - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542). diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 73afd952e2..67a67aec55 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -49,8 +49,44 @@ options: comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - Should not be used in conjunction with O(storage). + - This option is mutually exclusive with O(storage) and O(disk_volume). type: str + disk_volume: + description: + - Specify a hash/dictionary of the C(rootfs) disk. + - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. + - This option is mutually exclusive with O(storage) and O(disk). + type: dict + version_added: 9.2.0 + suboptions: + storage: + description: + - O(disk_volume.storage) is the storage identifier of the storage to use for the C(rootfs). + - Mutually exclusive with O(disk_volume.host_path). + type: str + volume: + description: + - O(disk_volume.volume) is the name of an existing volume. + - If not defined, the module will check if one exists. If not, a new volume will be created. + - If defined, the volume must exist under that name. + - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + type: str + size: + description: + - O(disk_volume.size) is the size of the storage to use. + - The size is given in GB. + - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + type: int + host_path: + description: + - O(disk_volume.host_path) defines a bind or device path on the PVE host to use for the C(rootfs). + - Mutually exclusive with O(disk_volume.storage), O(disk_volume.volume), and O(disk_volume.size). + type: path + options: + description: + - O(disk_volume.options) is a dict of extra options. + - The value of any given option must be a string, for example V("1"). + type: dict cores: description: - Specify number of cores per socket. @@ -89,8 +125,56 @@ options: version_added: 8.5.0 mounts: description: - - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points + - Specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points as strings. + - This Option is mutually exclusive with O(mount_volumes). type: dict + mount_volumes: + description: + - Specify additional mounts (separate disks) for the container. As a hash/dictionary defining mount points. + - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. + - This Option is mutually exclusive with O(mounts). + type: list + elements: dict + version_added: 9.2.0 + suboptions: + id: + description: + - O(mount_volumes[].id) is the identifier of the mount point written as C(mp[n]). + type: str + required: true + storage: + description: + - O(mount_volumes[].storage) is the storage identifier of the storage to use. + - Mutually exclusive with O(mount_volumes[].host_path). + type: str + volume: + description: + - O(mount_volumes[].volume) is the name of an existing volume. + - If not defined, the module will check if one exists. If not, a new volume will be created. + - If defined, the volume must exist under that name. + - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). + type: str + size: + description: + - O(mount_volumes[].size) is the size of the storage to use. + - The size is given in GB. + - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). + type: int + host_path: + description: + - O(mount_volumes[].host_path) defines a bind or device path on the PVE host to use for the C(rootfs). + - Mutually exclusive with O(mount_volumes[].storage), O(mount_volumes[].volume), and O(mount_volumes[].size). + type: path + mountpoint: + description: + - O(mount_volumes[].mountpoint) is the mount point of the volume. + type: path + required: true + options: + description: + - O(mount_volumes[].options) is a dict of extra options. + - The value of any given option must be a string, for example V("1"). + type: dict ip_address: description: - specifies the address the container will be assigned @@ -101,8 +185,8 @@ options: type: bool storage: description: - - target storage - - Should not be used in conjunction with O(disk). + - Target storage. + - This Option is mutually exclusive with O(disk) and O(disk_volume). type: str default: 'local' ostype: @@ -248,6 +332,20 @@ EXAMPLES = r''' ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' disk: 'local-lvm:20' +- name: Create new container with minimal options specifying disk storage location and size via disk_volume + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + disk_volume: + storage: local + size: 20 + - name: Create new container with hookscript and description community.general.proxmox: vmid: 100 @@ -329,6 +427,22 @@ EXAMPLES = r''' ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' mounts: '{"mp0":"local:8,mp=/mnt/test/"}' +- name: Create new container with minimal options defining a mount with 8GB using mount_volumes + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + mount_volumes: + - id: mp0 + storage: local + size: 8 + mountpoint: /mnt/test + - name: Create new container with minimal options defining a cpu core limit community.general.proxmox: vmid: 100 @@ -476,7 +590,9 @@ import time from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six import string_types +from ansible.module_utils.common.text.converters import to_native, to_text + from ansible_collections.community.general.plugins.module_utils.proxmox import ( ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) @@ -501,6 +617,124 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): msg="Updating configuration is only supported for LXC enabled proxmox clusters.", ) + def parse_disk_string(disk_string): + # Example strings: + # "acl=0,thin1:base-100-disk-1,size=8G" + # "thin1:10,backup=0" + # "local:20" + # "volume=local-lvm:base-100-disk-1,size=20G" + # "/mnt/bindmounts/shared,mp=/shared" + # "volume=/dev/USB01,mp=/mnt/usb01" + args = disk_string.split(",") + # If the volume is not explicitly defined but implicit by only passing a key, + # add the "volume=" key prefix for ease of parsing. + args = ["volume=" + arg if "=" not in arg else arg for arg in args] + # Then create a dictionary from the arguments + disk_kwargs = dict(map(lambda item: item.split("="), args)) + + VOLUME_PATTERN = r"""(?x) + (?:(?P[\w\-.]+): + (?:(?P\d+)| + (?P[^,\s]+)) + )| + (?P[^,\s]+) + """ + # DISCLAIMER: + # There are two things called a "volume": + # 1. The "volume" key which describes the storage volume, device or directory to mount into the container. + # 2. The storage volume of a storage-backed mount point in the PVE storage sub system. + # In this section, we parse the "volume" key and check which type of mount point we are dealing with. + pattern = re.compile(VOLUME_PATTERN) + match_dict = pattern.match(disk_kwargs.pop("volume")).groupdict() + match_dict = {k: v for k, v in match_dict.items() if v is not None} + + if "storage" in match_dict and "volume" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["volume"] = match_dict["volume"] + elif "storage" in match_dict and "size" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["size"] = match_dict["size"] + elif "host_path" in match_dict: + disk_kwargs["host_path"] = match_dict["host_path"] + + # Pattern matching only available in Python 3.10+ + # match match_dict: + # case {"storage": storage, "volume": volume}: + # disk_kwargs["storage"] = storage + # disk_kwargs["volume"] = volume + + # case {"storage": storage, "size": size}: + # disk_kwargs["storage"] = storage + # disk_kwargs["size"] = size + + # case {"host_path": host_path}: + # disk_kwargs["host_path"] = host_path + + return disk_kwargs + + def convert_mounts(mount_dict): + return_list = [] + for mount_key, mount_value in mount_dict.items(): + mount_config = parse_disk_string(mount_value) + return_list.append(dict(id=mount_key, **mount_config)) + + return return_list + + def build_volume( + key, + storage=None, + volume=None, + host_path=None, + size=None, + mountpoint=None, + options=None, + **kwargs + ): + if size is not None and isinstance(size, str): + size = size.strip("G") + # 1. Handle volume checks/creation + # 1.1 Check if defined volume exists + if volume is not None: + storage_content = self.get_storage_content(node, storage, vmid=vmid) + vol_ids = [vol["volid"] for vol in storage_content] + volid = "{storage}:{volume}".format(storage=storage, volume=volume) + if volid not in vol_ids: + self.module.fail_json( + changed=False, + msg="Storage {storage} does not contain volume {volume}".format( + storage=storage, + volume=volume, + ), + ) + vol_string = "{storage}:{volume},size={size}G".format( + storage=storage, volume=volume, size=size + ) + # 1.2 If volume not defined (but storage is), check if it exists + elif storage is not None: + api_node = self.proxmox_api.nodes( + node + ) # The node must exist, but not the LXC + try: + vol = api_node.lxc(vmid).get("config").get(key) + volume = parse_disk_string(vol).get("volume") + vol_string = "{storage}:{volume},size={size}G".format( + storage=storage, volume=volume, size=size + ) + + # If not, we have proxmox create one using the special syntax + except Exception: + vol_string = "{storage}:{size}".format(storage=storage, size=size) + + # 1.3 If we have a host_path, we don't have storage, a volume, or a size + vol_string = ",".join( + ([] if host_path is None else [host_path]) + + ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) + + ([] if options is None else [map("=".join, options.items())]) + + ([] if not kwargs else [map("=".join, kwargs.items())]) + ) + + return {key: vol_string} + # Version limited features minimum_version = {"tags": "6.1", "timezone": "6.3"} proxmox_node = self.proxmox_api.nodes(node) @@ -518,22 +752,35 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): ) # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = dict((key, val) for key, val in kwargs.items() if val is not None) if cpus is not None: kwargs["cpulimit"] = cpus if disk is not None: - kwargs["rootfs"] = disk + kwargs["disk_volume"] = parse_disk_string(disk) + if "disk_volume" in kwargs: + if not all(isinstance(val, string_types) for val in kwargs["disk_volume"].values()): + self.module.warn("All disk_volume values must be strings. Converting non-string values to strings.") + kwargs["disk_volume"] = {key: to_text(val) for key, val in kwargs["disk_volume"].items()} + disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume")) + kwargs.update(disk_dict) if memory is not None: kwargs["memory"] = memory if swap is not None: kwargs["swap"] = swap if "netif" in kwargs: - kwargs.update(kwargs["netif"]) - del kwargs["netif"] + kwargs.update(kwargs.pop("netif")) if "mounts" in kwargs: - kwargs.update(kwargs["mounts"]) - del kwargs["mounts"] + kwargs["mount_volumes"] = convert_mounts(kwargs.pop("mounts")) + if "mount_volumes" in kwargs: + mounts_list = kwargs.pop("mount_volumes") + for mount_config in mounts_list: + if not all(isinstance(val, string_types) for val in mount_config.values()): + self.module.warn("All mount_volumes values must be strings. Converting non-string values to strings.") + mount_config = {key: to_text(val) for key, val in mount_config.items()} + key = mount_config.pop("id") + mount_dict = build_volume(key=key, **mount_config) + kwargs.update(mount_dict) # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string if "tags" in kwargs: re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$") @@ -735,12 +982,53 @@ def main(): hostname=dict(), ostemplate=dict(), disk=dict(type='str'), + disk_volume=dict( + type="dict", + options=dict( + storage=dict(type="str"), + volume=dict(type="str"), + size=dict(type="int"), + host_path=dict(type="path"), + options=dict(type="dict"), + ), + required_together=[("storage", "size")], + required_by={ + "volume": ("storage", "size"), + }, + mutually_exclusive=[ + ("host_path", "storage"), + ("host_path", "volume"), + ("host_path", "size"), + ], + ), cores=dict(type='int'), cpus=dict(type='int'), memory=dict(type='int'), swap=dict(type='int'), netif=dict(type='dict'), mounts=dict(type='dict'), + mount_volumes=dict( + type="list", + elements="dict", + options=dict( + id=(dict(type="str", required=True)), + storage=dict(type="str"), + volume=dict(type="str"), + size=dict(type="int"), + host_path=dict(type="path"), + mountpoint=dict(type="path", required=True), + options=dict(type="dict"), + ), + required_together=[("storage", "size")], + required_by={ + "volume": ("storage", "size"), + }, + mutually_exclusive=[ + ("host_path", "storage"), + ("host_path", "volume"), + ("host_path", "size"), + ], + ), ip_address=dict(), ostype=dict(default='auto', choices=[ 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' @@ -776,11 +1064,17 @@ def main(): # either clone a container or create a new one from a template file. ('state', 'present', ('clone', 'ostemplate', 'update'), True), ], - required_together=[ - ('api_token_id', 'api_token_secret') + required_together=[("api_token_id", "api_token_secret")], + required_one_of=[("api_password", "api_token_id")], + mutually_exclusive=[ + ( + "clone", + "ostemplate", + "update", + ), # Creating a new container is done either by cloning an existing one, or based on a template. + ("disk", "disk_volume", "storage"), + ("mounts", "mount_volumes"), ], - required_one_of=[('api_password', 'api_token_id')], - mutually_exclusive=[('clone', 'ostemplate', 'update')], # Creating a new container is done either by cloning an existing one, or based on a template. ) proxmox = ProxmoxLxcAnsible(module) @@ -821,7 +1115,9 @@ def main(): cores=module.params["cores"], hostname=module.params["hostname"], netif=module.params["netif"], + disk_volume=module.params["disk_volume"], mounts=module.params["mounts"], + mount_volumes=module.params["mount_volumes"], ip_address=module.params["ip_address"], onboot=ansible_to_proxmox_bool(module.params["onboot"]), cpuunits=module.params["cpuunits"], @@ -876,7 +1172,9 @@ def main(): hostname=module.params['hostname'], ostemplate=module.params['ostemplate'], netif=module.params['netif'], + disk_volume=module.params["disk_volume"], mounts=module.params['mounts'], + mount_volumes=module.params["mount_volumes"], ostype=module.params['ostype'], ip_address=module.params['ip_address'], onboot=ansible_to_proxmox_bool(module.params['onboot']), From 9dd2b71d043e89f9918fdf2ccda22674ef25a66a Mon Sep 17 00:00:00 2001 From: Art Win Date: Sun, 14 Jul 2024 13:59:12 +0200 Subject: [PATCH 160/482] nsupdate: fix 'index out of range' error when changing NS records (#8614) * nsupdate: fix 'index out of range' error when changing NS records * add clog fragment * Update changelogs/fragments/8614-nsupdate-index-out-of-range.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8614-nsupdate-index-out-of-range.yml | 2 ++ plugins/modules/nsupdate.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8614-nsupdate-index-out-of-range.yml diff --git a/changelogs/fragments/8614-nsupdate-index-out-of-range.yml b/changelogs/fragments/8614-nsupdate-index-out-of-range.yml new file mode 100644 index 0000000000..00b6f8b974 --- /dev/null +++ b/changelogs/fragments/8614-nsupdate-index-out-of-range.yml @@ -0,0 +1,2 @@ +bugfixes: + - "nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614)." diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py index 63750165ca..c9a6ba2133 100644 --- a/plugins/modules/nsupdate.py +++ b/plugins/modules/nsupdate.py @@ -370,7 +370,8 @@ class RecordManager(object): except (socket_error, dns.exception.Timeout) as e: self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - entries_to_remove = [n.to_text() for n in lookup.answer[0].items if n.to_text() not in self.value] + lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0] + entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value] else: update.delete(self.module.params['record'], self.module.params['type']) From 83318c36aaf3f0e88362ac9d8a8d5ba7a4bfead2 Mon Sep 17 00:00:00 2001 From: leko Date: Sun, 14 Jul 2024 20:00:00 +0800 Subject: [PATCH 161/482] fix: sudosu not working on some BSD machines (#8214) * fix: sudosu not working on some BSD machines * fix: sudosu: added a flag (`alt_method`) to enhance compatibility with more versions of `su` * Update changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml Co-authored-by: Felix Fontein * Update plugins/become/sudosu.py Co-authored-by: Felix Fontein * Update plugins/become/sudosu.py Co-authored-by: Felix Fontein * Update plugins/become/sudosu.py Co-authored-by: Felix Fontein * Update plugins/become/sudosu.py Co-authored-by: Felix Fontein * fix: sudosu: lint --------- Co-authored-by: Felix Fontein --- ...udosu-not-working-on-some-BSD-machines.yml | 2 ++ plugins/become/sudosu.py | 20 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml diff --git a/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml b/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml new file mode 100644 index 0000000000..411ba8e868 --- /dev/null +++ b/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml @@ -0,0 +1,2 @@ +minor_changes: + - sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 60bb2aa517..2b009db4b7 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -55,6 +55,21 @@ DOCUMENTATION = """ ini: - section: sudo_become_plugin key: password + alt_method: + description: + - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), + it runs C(su -l user -c command). + - Use this when the default one is not working on your system. + required: false + type: boolean + ini: + - section: community.general.sudosu + key: alternative_method + vars: + - name: ansible_sudosu_alt_method + env: + - name: ANSIBLE_SUDOSU_ALT_METHOD + version_added: 9.2.0 """ @@ -89,4 +104,7 @@ class BecomeModule(BecomeBase): if user: user = '%s' % (user) - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + if self.get_option('alt_method'): + return ' '.join([becomecmd, flags, prompt, "su -l", user, "-c", self._build_success_command(cmd, shell, True)]) + else: + return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) From a78f7b1e6a45b27ed6ff83235fd864b208ec1624 Mon Sep 17 00:00:00 2001 From: Daniel Poggenpohl Date: Sun, 14 Jul 2024 22:08:02 +0200 Subject: [PATCH 162/482] #8572 - Updated docs to include pip >= 20.3b1 requirement (#8634) * #8572 - Updated docs to include pip >= 20.3b1 requirement * Update plugins/modules/pip_package_info.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/pip_package_info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py index 6aea178cec..f7354e3678 100644 --- a/plugins/modules/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -27,6 +27,7 @@ options: type: list elements: path requirements: + - pip >= 20.3b1 (necessary for the C(--format) option) - The requested pip executables must be installed on the target. author: - Matthew Jones (@matburt) From 3a24aa5b6d92589a655e37113653c312d7bf72d8 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 15 Jul 2024 08:52:01 +0200 Subject: [PATCH 163/482] Bump next expected version after release (#8641) --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 954334d918..57232d9e56 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.2.0 +version: 9.3.0 readme: README.md authors: - Ansible (https://github.com/ansible) From a24ee93f23f4653a9b5271107c2cc1c4b9489804 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Jul 2024 21:02:49 +0200 Subject: [PATCH 164/482] Type options of become plugins (#8623) Type options of become plugins. --- changelogs/fragments/8623-become-types.yml | 2 ++ plugins/become/doas.py | 18 ++++++++++++------ plugins/become/dzdo.py | 12 ++++++++---- plugins/become/ksu.py | 18 ++++++++++++------ plugins/become/machinectl.py | 12 ++++++++---- plugins/become/pbrun.py | 14 +++++++++----- plugins/become/pfexec.py | 16 ++++++++++------ plugins/become/pmrun.py | 7 +++++-- plugins/become/sesu.py | 12 ++++++++---- plugins/become/sudosu.py | 3 +++ 10 files changed, 77 insertions(+), 37 deletions(-) create mode 100644 changelogs/fragments/8623-become-types.yml diff --git a/changelogs/fragments/8623-become-types.yml b/changelogs/fragments/8623-become-types.yml new file mode 100644 index 0000000000..c38e67eca1 --- /dev/null +++ b/changelogs/fragments/8623-become-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623)." diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 69e730aad4..761e5e1e95 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -26,7 +27,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_DOAS_USER become_exe: - description: Doas executable + description: Doas executable. + type: string default: doas ini: - section: privilege_escalation @@ -40,7 +42,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_DOAS_EXE become_flags: - description: Options to pass to doas + description: Options to pass to doas. + type: string default: '' ini: - section: privilege_escalation @@ -54,7 +57,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_DOAS_FLAGS become_pass: - description: password for doas prompt + description: Password for doas prompt. + type: string required: false vars: - name: ansible_become_password @@ -68,8 +72,10 @@ DOCUMENTATION = ''' key: password prompt_l10n: description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one + - List of localized strings to match for prompt detection. + - If empty we will use the built in one. + type: list + elements: string default: [] ini: - section: doas_become_plugin diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index a358e84e39..d94c684d1f 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -26,7 +27,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_DZDO_USER become_exe: - description: Dzdo executable + description: Dzdo executable. + type: string default: dzdo ini: - section: privilege_escalation @@ -40,7 +42,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_DZDO_EXE become_flags: - description: Options to pass to dzdo + description: Options to pass to dzdo. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -54,7 +57,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_DZDO_FLAGS become_pass: - description: Options to pass to dzdo + description: Options to pass to dzdo. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index fa2f66864a..2be1832dc2 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_KSU_USER required: true become_exe: - description: Su executable + description: Su executable. + type: string default: ksu ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_KSU_EXE become_flags: - description: Options to pass to ksu + description: Options to pass to ksu. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_KSU_FLAGS become_pass: - description: ksu password + description: Ksu password. + type: string required: false vars: - name: ansible_ksu_pass @@ -69,8 +73,10 @@ DOCUMENTATION = ''' key: password prompt_l10n: description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one + - List of localized strings to match for prompt detection. + - If empty we will use the built in one. + type: list + elements: string default: [] ini: - section: ksu_become_plugin diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index e2773ed6a5..a0467c2c36 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_MACHINECTL_USER become_exe: - description: Machinectl executable + description: Machinectl executable. + type: string default: machinectl ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_MACHINECTL_EXE become_flags: - description: Options to pass to machinectl + description: Options to pass to machinectl. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_MACHINECTL_FLAGS become_pass: - description: Password for machinectl + description: Password for machinectl. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index 7d1437191e..8a96b75797 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_PBRUN_USER become_exe: - description: Sudo executable + description: Sudo executable. + type: string default: pbrun ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PBRUN_EXE become_flags: - description: Options to pass to pbrun + description: Options to pass to pbrun. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PBRUN_FLAGS become_pass: - description: Password for pbrun + description: Password for pbrun. + type: string required: false vars: - name: ansible_become_password @@ -68,7 +72,7 @@ DOCUMENTATION = ''' - section: pbrun_become_plugin key: password wrap_exe: - description: Toggle to wrap the command pbrun calls in 'shell -c' or not + description: Toggle to wrap the command pbrun calls in C(shell -c) or not. default: false type: bool ini: diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 2468a28a94..d48d622713 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -14,9 +14,10 @@ DOCUMENTATION = ''' options: become_user: description: - - User you 'become' to execute the task + - User you 'become' to execute the task. - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out, but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. + type: string default: root ini: - section: privilege_escalation @@ -30,7 +31,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_PFEXEC_USER become_exe: - description: Sudo executable + description: Sudo executable. + type: string default: pfexec ini: - section: privilege_escalation @@ -44,7 +46,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PFEXEC_EXE become_flags: - description: Options to pass to pfexec + description: Options to pass to pfexec. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -58,7 +61,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PFEXEC_FLAGS become_pass: - description: pfexec password + description: pfexec password. + type: string required: false vars: - name: ansible_become_password @@ -71,7 +75,7 @@ DOCUMENTATION = ''' - section: pfexec_become_plugin key: password wrap_exe: - description: Toggle to wrap the command pfexec calls in 'shell -c' or not + description: Toggle to wrap the command pfexec calls in C(shell -c) or not. default: false type: bool ini: @@ -82,7 +86,7 @@ DOCUMENTATION = ''' env: - name: ANSIBLE_PFEXEC_WRAP_EXECUTION notes: - - This plugin ignores O(become_user) as pfexec uses it's own C(exec_attr) to figure this out. + - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out. ''' from ansible.plugins.become import BecomeBase diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 74b633f09a..908c5e759d 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -14,6 +14,7 @@ DOCUMENTATION = ''' options: become_exe: description: Sudo executable + type: string default: pmrun ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PMRUN_EXE become_flags: - description: Options to pass to pmrun + description: Options to pass to pmrun. + type: string default: '' ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PMRUN_FLAGS become_pass: - description: pmrun password + description: pmrun password. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 5958c1bfca..4dcb837e70 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: ansible (@nekonyuu) options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_SESU_USER become_exe: - description: sesu executable + description: sesu executable. + type: string default: sesu ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_SESU_EXE become_flags: - description: Options to pass to sesu + description: Options to pass to sesu. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_SESU_FLAGS become_pass: - description: Password to pass to sesu + description: Password to pass to sesu. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 2b009db4b7..5454fd2316 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -16,6 +16,7 @@ DOCUMENTATION = """ options: become_user: description: User you 'become' to execute the task. + type: string default: root ini: - section: privilege_escalation @@ -30,6 +31,7 @@ DOCUMENTATION = """ - name: ANSIBLE_SUDO_USER become_flags: description: Options to pass to C(sudo). + type: string default: -H -S -n ini: - section: privilege_escalation @@ -44,6 +46,7 @@ DOCUMENTATION = """ - name: ANSIBLE_SUDO_FLAGS become_pass: description: Password to pass to C(sudo). + type: string required: false vars: - name: ansible_become_password From daed4dcc94ad729571d77c1ff4984b3fe1c35096 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Jul 2024 21:03:41 +0200 Subject: [PATCH 165/482] Type options of lookup plugins (#8626) Type options of lookup plugins. --- changelogs/fragments/8626-lookup-types.yml | 2 ++ plugins/lookup/chef_databag.py | 2 ++ plugins/lookup/consul_kv.py | 14 +++++++++++--- plugins/lookup/cyberarkpassword.py | 4 ++++ plugins/lookup/dsv.py | 5 +++++ plugins/lookup/etcd.py | 2 ++ plugins/lookup/filetree.py | 4 +++- plugins/lookup/hiera.py | 2 ++ plugins/lookup/onepassword.py | 2 ++ plugins/lookup/onepassword_doc.py | 2 ++ plugins/lookup/onepassword_raw.py | 2 ++ plugins/lookup/passwordstore.py | 3 ++- plugins/lookup/redis.py | 3 +++ plugins/lookup/shelvefile.py | 4 ++++ plugins/lookup/tss.py | 10 +++++++++- 15 files changed, 55 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8626-lookup-types.yml diff --git a/changelogs/fragments/8626-lookup-types.yml b/changelogs/fragments/8626-lookup-types.yml new file mode 100644 index 0000000000..b6ebf35748 --- /dev/null +++ b/changelogs/fragments/8626-lookup-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626)." diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index b14d924ae8..a116b21e5f 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -22,10 +22,12 @@ DOCUMENTATION = ''' name: description: - Name of the databag + type: string required: true item: description: - Item to fetch + type: string required: true ''' diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index f8aadadc19..79eb65edb1 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -29,13 +29,17 @@ DOCUMENTATION = ''' index: description: - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int datacenter: description: - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str token: description: The acl token to allow access to restricted values. + type: str host: default: localhost + type: str description: - The target to connect to, must be a resolvable address. - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set. @@ -46,22 +50,26 @@ DOCUMENTATION = ''' description: - The port of the target host to connect to. - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. + type: int default: 8500 scheme: default: http + type: str description: - Whether to use http or https. - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. validate_certs: default: true - description: Whether to verify the ssl connection or not. + description: Whether to verify the TLS connection or not. + type: bool env: - name: ANSIBLE_CONSUL_VALIDATE_CERTS ini: - section: lookup_consul key: validate_certs client_cert: - description: The client cert to verify the ssl connection. + description: The client cert to verify the TLS connection. + type: str env: - name: ANSIBLE_CONSUL_CLIENT_CERT ini: @@ -94,7 +102,7 @@ EXAMPLES = """ - name: retrieving a KV from a remote cluster on non default port ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" """ RETURN = """ diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index c3cc427df8..6a08675b3b 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -17,19 +17,23 @@ DOCUMENTATION = ''' options : _command: description: Cyberark CLI utility. + type: string env: - name: AIM_CLIPASSWORDSDK_CMD default: '/opt/CARKaim/sdk/clipasswordsdk' appid: description: Defines the unique ID of the application that is issuing the password request. + type: string required: true query: description: Describes the filter criteria for the password retrieval. + type: string required: true output: description: - Specifies the desired output fields separated by commas. - "They could be: Password, PassProps., PasswordChangeInProcess" + type: string default: 'password' _extra: description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 2dbb7db3ea..5e26c43af4 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -22,6 +22,7 @@ options: required: true tenant: description: The first format parameter in the default O(url_template). + type: string env: - name: DSV_TENANT ini: @@ -32,6 +33,7 @@ options: default: com description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string env: - name: DSV_TLD ini: @@ -40,6 +42,7 @@ options: required: false client_id: description: The client_id with which to request the Access Grant. + type: string env: - name: DSV_CLIENT_ID ini: @@ -48,6 +51,7 @@ options: required: true client_secret: description: The client secret associated with the specific O(client_id). + type: string env: - name: DSV_CLIENT_SECRET ini: @@ -58,6 +62,7 @@ options: default: https://{}.secretsvaultcloud.{}/v1 description: The path to prepend to the base URL to form a valid REST API request. + type: string env: - name: DSV_URL_TEMPLATE ini: diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 5135e74877..1dec890b20 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -25,12 +25,14 @@ DOCUMENTATION = ''' url: description: - Environment variable with the URL for the etcd server + type: string default: 'http://127.0.0.1:4001' env: - name: ANSIBLE_ETCD_URL version: description: - Environment variable with the etcd protocol version + type: string default: 'v1' env: - name: ANSIBLE_ETCD_VERSION diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 2131de99a5..ee7bfe27b7 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -17,8 +17,10 @@ description: This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. options: _terms: - description: path(s) of files to read + description: Path(s) of files to read. required: true + type: list + elements: string ''' EXAMPLES = r""" diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index fa4d0a1999..02669c98dc 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -25,12 +25,14 @@ DOCUMENTATION = ''' executable: description: - Binary file to execute Hiera. + type: string default: '/usr/bin/hiera' env: - name: ANSIBLE_HIERA_BIN config_file: description: - File that describes the hierarchy of Hiera. + type: string default: '/etc/hiera.yaml' env: - name: ANSIBLE_HIERA_CFG diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 8ca95de0bc..f9b8c6dfa3 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -23,6 +23,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string account_id: version_added: 7.5.0 domain: diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py index ab24795df2..789e51c35a 100644 --- a/plugins/lookup/onepassword_doc.py +++ b/plugins/lookup/onepassword_doc.py @@ -24,6 +24,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string extends_documentation_fragment: - community.general.onepassword diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index 3eef535a1c..dc3e590329 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -23,6 +23,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string account_id: version_added: 7.5.0 domain: diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 9814fe133b..a1b0842a7b 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -42,8 +42,9 @@ DOCUMENTATION = ''' default: false umask: description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). - Note pass' default value is V('077'). + type: string env: - name: PASSWORD_STORE_UMASK version_added: 1.3.0 diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 43b046a798..17cbf120e9 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -19,8 +19,11 @@ DOCUMENTATION = ''' options: _terms: description: list of keys to query + type: list + elements: string host: description: location of Redis host + type: string default: '127.0.0.1' env: - name: ANSIBLE_REDIS_HOST diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 35f1097c8b..70d18338e9 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -15,11 +15,15 @@ DOCUMENTATION = ''' options: _terms: description: Sets of key value pairs of parameters. + type: list + elements: str key: description: Key to query. + type: str required: true file: description: Path to shelve file. + type: path required: true ''' diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 80105ff715..f2d79ed168 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -25,7 +25,8 @@ options: _terms: description: The integer ID of the secret. required: true - type: int + type: list + elements: int secret_path: description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. required: false @@ -52,6 +53,7 @@ options: version_added: 7.0.0 base_url: description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string env: - name: TSS_BASE_URL ini: @@ -60,6 +62,7 @@ options: required: true username: description: The username with which to request the OAuth2 Access Grant. + type: string env: - name: TSS_USERNAME ini: @@ -69,6 +72,7 @@ options: description: - The password associated with the supplied username. - Required when O(token) is not provided. + type: string env: - name: TSS_PASSWORD ini: @@ -80,6 +84,7 @@ options: - The domain with which to request the OAuth2 Access Grant. - Optional when O(token) is not provided. - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string env: - name: TSS_DOMAIN ini: @@ -92,6 +97,7 @@ options: - Existing token for Thycotic authorizer. - If provided, O(username) and O(password) are not needed. - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string env: - name: TSS_TOKEN ini: @@ -102,6 +108,7 @@ options: default: /api/v1 description: The path to append to the base URL to form a valid REST API request. + type: string env: - name: TSS_API_PATH_URI required: false @@ -109,6 +116,7 @@ options: default: /oauth2/token description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string env: - name: TSS_TOKEN_PATH_URI required: false From f9a56b9a9b5342eab0a102f7259585a143c329db Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Jul 2024 21:04:16 +0200 Subject: [PATCH 166/482] Type options of inventory plugins (#8625) Type options of inventory plugins. --- changelogs/fragments/8625-inventory-types.yml | 2 ++ plugins/inventory/cobbler.py | 4 ++++ plugins/inventory/linode.py | 2 ++ plugins/inventory/lxd.py | 3 ++- plugins/inventory/nmap.py | 4 +++- plugins/inventory/online.py | 2 ++ plugins/inventory/scaleway.py | 2 ++ plugins/inventory/stackpath_compute.py | 1 + plugins/inventory/virtualbox.py | 3 +++ 9 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8625-inventory-types.yml diff --git a/changelogs/fragments/8625-inventory-types.yml b/changelogs/fragments/8625-inventory-types.yml new file mode 100644 index 0000000000..a89352a230 --- /dev/null +++ b/changelogs/fragments/8625-inventory-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625)." diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index cdef9944a0..664380da8f 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -21,20 +21,24 @@ DOCUMENTATION = ''' options: plugin: description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as it's own. + type: string required: true choices: [ 'cobbler', 'community.general.cobbler' ] url: description: URL to cobbler. + type: string default: 'http://cobbler/cobbler_api' env: - name: COBBLER_SERVER user: description: Cobbler authentication user. + type: string required: false env: - name: COBBLER_USER password: description: Cobbler authentication password. + type: string required: false env: - name: COBBLER_PASSWORD diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index e161e086e5..5c9a4718f5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -35,6 +35,7 @@ DOCUMENTATION = r''' version_added: 4.5.0 plugin: description: Marks this as an instance of the 'linode' plugin. + type: string required: true choices: ['linode', 'community.general.linode'] ip_style: @@ -47,6 +48,7 @@ DOCUMENTATION = r''' version_added: 3.6.0 access_token: description: The Linode account personal access token. + type: string required: true env: - name: LINODE_ACCESS_TOKEN diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index cf64f4ee8c..9ae004f6c5 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -20,6 +20,7 @@ DOCUMENTATION = r''' options: plugin: description: Token that ensures this is a source file for the 'lxd' plugin. + type: string required: true choices: [ 'community.general.lxd' ] url: @@ -27,8 +28,8 @@ DOCUMENTATION = r''' - The unix domain socket path or the https URL for the lxd server. - Sockets in filesystem have to start with C(unix:). - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str client_key: description: - The client certificate key file path. diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 2ca474a1ff..48f02c446b 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' options: plugin: description: token that ensures this is a source file for the 'nmap' plugin. + type: string required: true choices: ['nmap', 'community.general.nmap'] sudo: @@ -29,6 +30,7 @@ DOCUMENTATION = ''' type: boolean address: description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string required: true env: - name: ANSIBLE_NMAP_ADDRESS @@ -91,7 +93,7 @@ DOCUMENTATION = ''' default: true version_added: 7.4.0 notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). - 'TODO: add OS fingerprinting' ''' EXAMPLES = ''' diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 9355d9d414..70b8d14192 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -16,11 +16,13 @@ DOCUMENTATION = r''' options: plugin: description: token that ensures this is a source file for the 'online' plugin. + type: string required: true choices: ['online', 'community.general.online'] oauth_token: required: true description: Online OAuth token. + type: string env: # in order of precedence - name: ONLINE_TOKEN diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index dc24a17dab..4205caeca7 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -20,6 +20,7 @@ DOCUMENTATION = r''' plugin: description: Token that ensures this is a source file for the 'scaleway' plugin. required: true + type: string choices: ['scaleway', 'community.general.scaleway'] regions: description: Filter results on a specific Scaleway region. @@ -46,6 +47,7 @@ DOCUMENTATION = r''' - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string env: # in order of precedence - name: SCW_TOKEN diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 6b48a49f12..8508b4e797 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -24,6 +24,7 @@ DOCUMENTATION = ''' description: - A token that ensures this is a source file for the plugin. required: true + type: string choices: ['community.general.stackpath_compute'] client_id: description: diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 425ed91642..d48c294fd9 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -22,6 +22,7 @@ DOCUMENTATION = ''' options: plugin: description: token that ensures this is a source file for the 'virtualbox' plugin + type: string required: true choices: ['virtualbox', 'community.general.virtualbox'] running_only: @@ -30,8 +31,10 @@ DOCUMENTATION = ''' default: false settings_password_file: description: provide a file containing the settings password (equivalent to --settingspwfile) + type: string network_info_path: description: property path to query for network information (ansible_host) + type: string default: "/VirtualBox/GuestInfo/Net/0/V4/IP" query: description: create vars from virtualbox properties From ce65eb873695797375de01fb75011017e7a17d56 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Jul 2024 21:04:53 +0200 Subject: [PATCH 167/482] Type options of connection plugins (#8627) Type options of connection plugins. --- changelogs/fragments/8627-connection-types.yml | 2 ++ plugins/connection/chroot.py | 3 +++ plugins/connection/funcd.py | 1 + plugins/connection/incus.py | 4 ++++ plugins/connection/iocage.py | 2 ++ plugins/connection/jail.py | 2 ++ plugins/connection/lxc.py | 2 ++ plugins/connection/lxd.py | 4 ++++ plugins/connection/qubes.py | 8 +++++--- plugins/connection/zone.py | 3 ++- 10 files changed, 27 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8627-connection-types.yml diff --git a/changelogs/fragments/8627-connection-types.yml b/changelogs/fragments/8627-connection-types.yml new file mode 100644 index 0000000000..9b92735fb8 --- /dev/null +++ b/changelogs/fragments/8627-connection-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627)." diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 810316aaa5..3567912359 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' remote_addr: description: - The path of the chroot you want to access. + type: string default: inventory_hostname vars: - name: inventory_hostname @@ -27,6 +28,7 @@ DOCUMENTATION = ''' executable: description: - User specified executable shell + type: string ini: - section: defaults key: executable @@ -38,6 +40,7 @@ DOCUMENTATION = ''' chroot_exe: description: - User specified chroot binary + type: string ini: - section: chroot_connection key: exe diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 219a8cccd3..7765f53110 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' remote_addr: description: - The path of the chroot you want to access. + type: string default: inventory_hostname vars: - name: ansible_host diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index 81d6f971c7..8adea2d13a 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -19,6 +19,7 @@ DOCUMENTATION = """ remote_addr: description: - The instance identifier. + type: string default: inventory_hostname vars: - name: inventory_hostname @@ -27,6 +28,7 @@ DOCUMENTATION = """ executable: description: - The shell to use for execution inside the instance. + type: string default: /bin/sh vars: - name: ansible_executable @@ -35,6 +37,7 @@ DOCUMENTATION = """ description: - The name of the Incus remote to use (per C(incus remote list)). - Remotes are used to access multiple servers from a single client. + type: string default: local vars: - name: ansible_incus_remote @@ -42,6 +45,7 @@ DOCUMENTATION = """ description: - The name of the Incus project to use (per C(incus project list)). - Projects are used to divide the instances running on a server. + type: string default: default vars: - name: ansible_incus_project diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 2e2a6f0937..79d4f88594 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -20,12 +20,14 @@ DOCUMENTATION = ''' remote_addr: description: - Path to the jail + type: string vars: - name: ansible_host - name: ansible_iocage_host remote_user: description: - User to execute as inside the jail + type: string vars: - name: ansible_user - name: ansible_iocage_user diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 3a3edd4b18..7d0abdde3a 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' remote_addr: description: - Path to the jail + type: string default: inventory_hostname vars: - name: inventory_hostname @@ -28,6 +29,7 @@ DOCUMENTATION = ''' remote_user: description: - User to execute as inside the jail + type: string vars: - name: ansible_user - name: ansible_jail_user diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 7bb5824fac..2710e6984e 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -17,6 +17,7 @@ DOCUMENTATION = ''' remote_addr: description: - Container identifier + type: string default: inventory_hostname vars: - name: inventory_hostname @@ -26,6 +27,7 @@ DOCUMENTATION = ''' default: /bin/sh description: - Shell executable + type: string vars: - name: ansible_executable - name: ansible_lxc_executable diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 0e784b85fd..d850907182 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -19,6 +19,7 @@ DOCUMENTATION = ''' - Instance (container/VM) identifier. - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is used as the instance identifier. + type: string default: inventory_hostname vars: - name: inventory_hostname @@ -27,6 +28,7 @@ DOCUMENTATION = ''' executable: description: - Shell to use for execution inside instance. + type: string default: /bin/sh vars: - name: ansible_executable @@ -34,6 +36,7 @@ DOCUMENTATION = ''' remote: description: - Name of the LXD remote to use. + type: string default: local vars: - name: ansible_lxd_remote @@ -41,6 +44,7 @@ DOCUMENTATION = ''' project: description: - Name of the LXD project to use. + type: string vars: - name: ansible_lxd_project version_added: 2.0.0 diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 25594e952b..b54eeb3a84 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -25,14 +25,16 @@ DOCUMENTATION = ''' options: remote_addr: description: - - vm name + - VM name. + type: string default: inventory_hostname vars: - name: ansible_host remote_user: description: - - The user to execute as inside the vm. - default: The *user* account as default in Qubes OS. + - The user to execute as inside the VM. + type: string + default: The I(user) account as default in Qubes OS. vars: - name: ansible_user # keyword: diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 34827c7e37..0a591143e0 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -16,11 +16,12 @@ DOCUMENTATION = ''' name: zone short_description: Run tasks in a zone instance description: - - Run commands or put/fetch files to an existing zone + - Run commands or put/fetch files to an existing zone. options: remote_addr: description: - Zone identifier + type: string default: inventory_hostname vars: - name: ansible_host From cac55beb4fe7e22f2d4d868fef3affbb854e558f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Jul 2024 21:05:34 +0200 Subject: [PATCH 168/482] Type options of callback plugins (#8628) Type options of callback plugins. --- changelogs/fragments/8628-callback-types.yml | 2 ++ plugins/callback/cgroup_memory_recap.py | 2 ++ plugins/callback/hipchat.py | 7 +++++++ plugins/callback/jabber.py | 4 ++++ plugins/callback/log_plays.py | 1 + plugins/callback/loganalytics.py | 2 ++ plugins/callback/logentries.py | 4 ++++ plugins/callback/logstash.py | 4 ++++ plugins/callback/slack.py | 3 +++ plugins/callback/splunk.py | 2 ++ plugins/callback/sumologic.py | 1 + plugins/callback/syslog_json.py | 3 +++ 12 files changed, 35 insertions(+) create mode 100644 changelogs/fragments/8628-callback-types.yml diff --git a/changelogs/fragments/8628-callback-types.yml b/changelogs/fragments/8628-callback-types.yml new file mode 100644 index 0000000000..c223a85985 --- /dev/null +++ b/changelogs/fragments/8628-callback-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628)." diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index d3961bf0c8..643f0f0b88 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -25,6 +25,7 @@ DOCUMENTATION = ''' max_mem_file: required: true description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). + type: str env: - name: CGROUP_MAX_MEM_FILE ini: @@ -33,6 +34,7 @@ DOCUMENTATION = ''' cur_mem_file: required: true description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). + type: str env: - name: CGROUP_CUR_MEM_FILE ini: diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index afd9e20559..bf0d425303 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -25,6 +25,7 @@ DOCUMENTATION = ''' options: token: description: HipChat API token for v1 or v2 API. + type: str required: true env: - name: HIPCHAT_TOKEN @@ -33,6 +34,10 @@ DOCUMENTATION = ''' key: token api_version: description: HipChat API version, v1 or v2. + type: str + choices: + - v1 + - v2 required: false default: v1 env: @@ -42,6 +47,7 @@ DOCUMENTATION = ''' key: api_version room: description: HipChat room to post in. + type: str default: ansible env: - name: HIPCHAT_ROOM @@ -50,6 +56,7 @@ DOCUMENTATION = ''' key: room from: description: Name to post as + type: str default: ansible env: - name: HIPCHAT_FROM diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index d2d00496d8..302687b708 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -20,21 +20,25 @@ DOCUMENTATION = ''' options: server: description: connection info to jabber server + type: str required: true env: - name: JABBER_SERV user: description: Jabber user to authenticate as + type: str required: true env: - name: JABBER_USER password: description: Password for the user to the jabber server + type: str required: true env: - name: JABBER_PASS to: description: chat identifier that will receive the message + type: str required: true env: - name: JABBER_TO diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index e99054e176..daa88bcc11 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' log_folder: default: /var/log/ansible/hosts description: The folder where log files will be created. + type: str env: - name: ANSIBLE_LOG_FOLDER ini: diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ed7e47b2e2..fd1b2772c4 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' options: workspace_id: description: Workspace ID of the Azure log analytics workspace. + type: str required: true env: - name: WORKSPACE_ID @@ -29,6 +30,7 @@ DOCUMENTATION = ''' key: workspace_id shared_key: description: Shared key to connect to Azure log analytics workspace. + type: str required: true env: - name: WORKSPACE_SHARED_KEY diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index d3feceb72e..c1271543ad 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -22,6 +22,7 @@ DOCUMENTATION = ''' options: api: description: URI to the Logentries API. + type: str env: - name: LOGENTRIES_API default: data.logentries.com @@ -30,6 +31,7 @@ DOCUMENTATION = ''' key: api port: description: HTTP port to use when connecting to the API. + type: int env: - name: LOGENTRIES_PORT default: 80 @@ -38,6 +40,7 @@ DOCUMENTATION = ''' key: port tls_port: description: Port to use when connecting to the API when TLS is enabled. + type: int env: - name: LOGENTRIES_TLS_PORT default: 443 @@ -46,6 +49,7 @@ DOCUMENTATION = ''' key: tls_port token: description: The logentries C(TCP token). + type: str env: - name: LOGENTRIES_ANSIBLE_TOKEN required: true diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index f3725e465a..aa47ee4eb8 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -20,6 +20,7 @@ DOCUMENTATION = r''' options: server: description: Address of the Logstash server. + type: str env: - name: LOGSTASH_SERVER ini: @@ -29,6 +30,7 @@ DOCUMENTATION = r''' default: localhost port: description: Port on which logstash is listening. + type: int env: - name: LOGSTASH_PORT ini: @@ -38,6 +40,7 @@ DOCUMENTATION = r''' default: 5000 type: description: Message type. + type: str env: - name: LOGSTASH_TYPE ini: @@ -47,6 +50,7 @@ DOCUMENTATION = r''' default: ansible pre_command: description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. + type: str version_added: 2.0.0 ini: - section: callback_logstash diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index e7a2743ec5..2a995992ee 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -22,6 +22,7 @@ DOCUMENTATION = ''' webhook_url: required: true description: Slack Webhook URL. + type: str env: - name: SLACK_WEBHOOK_URL ini: @@ -30,6 +31,7 @@ DOCUMENTATION = ''' channel: default: "#ansible" description: Slack room to post in. + type: str env: - name: SLACK_CHANNEL ini: @@ -37,6 +39,7 @@ DOCUMENTATION = ''' key: channel username: description: Username to post as. + type: str env: - name: SLACK_USERNAME default: ansible diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index a3e401bc21..b2ce48de25 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -22,6 +22,7 @@ DOCUMENTATION = ''' options: url: description: URL to the Splunk HTTP collector source. + type: str env: - name: SPLUNK_URL ini: @@ -29,6 +30,7 @@ DOCUMENTATION = ''' key: url authtoken: description: Token to authenticate the connection to the Splunk HTTP collector. + type: str env: - name: SPLUNK_AUTHTOKEN ini: diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 0304b9de52..32ca6e0ed0 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -20,6 +20,7 @@ requirements: options: url: description: URL to the Sumologic HTTP collector source. + type: str env: - name: SUMOLOGIC_URL ini: diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 43d6ff2f9f..9066d8d9c5 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -19,6 +19,7 @@ DOCUMENTATION = ''' options: server: description: Syslog server that will receive the event. + type: str env: - name: SYSLOG_SERVER default: localhost @@ -27,6 +28,7 @@ DOCUMENTATION = ''' key: syslog_server port: description: Port on which the syslog server is listening. + type: int env: - name: SYSLOG_PORT default: 514 @@ -35,6 +37,7 @@ DOCUMENTATION = ''' key: syslog_port facility: description: Syslog facility to log as. + type: str env: - name: SYSLOG_FACILITY default: user From c0fd10e7934b8bd3dc6a962a651906be64f75300 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Jul 2024 17:18:41 +0200 Subject: [PATCH 169/482] Various docs improvements (#8664) Various docs improvements. --- plugins/modules/haproxy.py | 5 ++-- plugins/modules/ipa_dnsrecord.py | 47 +++++++++++++++---------------- plugins/modules/maven_artifact.py | 32 ++++++++++----------- plugins/modules/nagios.py | 2 -- plugins/modules/pkgng.py | 1 - 5 files changed, 40 insertions(+), 47 deletions(-) diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index cbaa438334..320c77e7a1 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -65,9 +65,8 @@ options: state: description: - Desired state of the provided backend host. - - Note that V(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. + When used on versions < 1.5, it will be ignored. type: str required: true choices: [ disabled, drain, enabled ] diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index 59475a55be..1dad138377 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -35,11 +35,8 @@ options: record_type: description: - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX' and 'SSHFP' are supported. - - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - - "'SRV' and 'MX' are added in version 2.8." - - "'NS' are added in comunity.general 8.2.0." - - "'SSHFP' are added in community.general 9.1.0." + - Support for V(NS) was added in comunity.general 8.2.0. + - Support for V(SSHFP) was added in community.general 9.1.0. required: false default: 'A' choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] @@ -49,31 +46,31 @@ options: - Manage DNS record name with this value. - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. - Use O(record_values) if you need to specify multiple values. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. - - In the case of 'SSHFP' record type, this will be an SSH fingerprint record. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: str record_values: description: - Manage DNS record name with this value. - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. - - In the case of 'SSHFP' record type, this will be an SSH fingerprint record. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: list elements: str record_ttl: diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py index 0dc020c37a..e239b4a164 100644 --- a/plugins/modules/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -11,7 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository @@ -22,7 +22,7 @@ description: author: "Chris Schmidt (@chrisisbeef)" requirements: - lxml - - boto if using a S3 repository (s3://...) + - boto if using a S3 repository (V(s3://...)) attributes: check_mode: support: none @@ -32,52 +32,52 @@ options: group_id: type: str description: - - The Maven groupId coordinate + - The Maven groupId coordinate. required: true artifact_id: type: str description: - - The maven artifactId coordinate + - The maven artifactId coordinate. required: true version: type: str description: - - The maven version coordinate + - The maven version coordinate. - Mutually exclusive with O(version_by_spec). version_by_spec: type: str description: - The maven dependency version ranges. - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) - - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. + - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. - Mutually exclusive with O(version). version_added: '0.2.0' classifier: type: str description: - - The maven classifier coordinate + - The maven classifier coordinate. default: '' extension: type: str description: - - The maven type/extension coordinate + - The maven type/extension coordinate. default: jar repository_url: type: str description: - The URL of the Maven Repository to download from. - - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. - - Use file://... if the repository is local, added in version 2.6 + - Use V(s3://...) if the repository is hosted on Amazon S3. + - Use V(file://...) if the repository is local. default: https://repo1.maven.org/maven2 username: type: str description: - - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. aliases: [ "aws_secret_key" ] password: type: str description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3. aliases: [ "aws_secret_access_key" ] headers: description: @@ -95,19 +95,19 @@ options: dest: type: path description: - - The path where the artifact should be written to - - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file + - The path where the artifact should be written to. + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. required: true state: type: str description: - - The desired state of the artifact + - The desired state of the artifact. default: present choices: [present,absent] timeout: type: int description: - - Specifies a timeout in seconds for the connection attempt + - Specifies a timeout in seconds for the connection attempt. default: 10 validate_certs: description: diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py index 783aa88e24..0f1f0b7c50 100644 --- a/plugins/modules/nagios.py +++ b/plugins/modules/nagios.py @@ -39,8 +39,6 @@ options: action: description: - Action to take. - - servicegroup options were added in 2.0. - - delete_downtime options were added in 2.2. - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. required: true choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index 88c9b8e3b9..e283311c8e 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -127,7 +127,6 @@ EXAMPLES = ''' - bar state: absent -# "latest" support added in 2.7 - name: Upgrade package baz community.general.pkgng: name: baz From e1148e6bdcaa0bf5c0135197fb774af9f7f06cb4 Mon Sep 17 00:00:00 2001 From: JL Euler Date: Tue, 23 Jul 2024 17:58:54 +0200 Subject: [PATCH 170/482] Fix new Proxmox Volume handling (#8646) * proxmox(fix): volume string builder Half of the string was incorrectly discarded * proxmox(fix): remove string conversion of values - Also converted `None` values into strings - Clashed with non-`str` values in documentation * proxmox: add changelog fragment * proxmox(fix): remove old & unused imports * proxmox(fix): correctly turn maps into lists * Update changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml Co-authored-by: Felix Fontein * Update plugins/modules/proxmox.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8646-fix-bug-in-proxmox-volumes.yml | 4 ++++ plugins/modules/proxmox.py | 14 ++++---------- 2 files changed, 8 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml diff --git a/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml b/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml new file mode 100644 index 0000000000..b3b03a008b --- /dev/null +++ b/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml @@ -0,0 +1,4 @@ +bugfixes: + - proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). + - proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). + - proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 67a67aec55..104a896362 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -590,8 +590,7 @@ import time from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import string_types -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.proxmox import ( @@ -727,10 +726,11 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): # 1.3 If we have a host_path, we don't have storage, a volume, or a size vol_string = ",".join( + [vol_string] + ([] if host_path is None else [host_path]) + ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) + - ([] if options is None else [map("=".join, options.items())]) + - ([] if not kwargs else [map("=".join, kwargs.items())]) + ([] if options is None else ["{0}={1}".format(k, v) for k, v in options.items()]) + + ([] if not kwargs else ["{0}={1}".format(k, v) for k, v in kwargs.items()]) ) return {key: vol_string} @@ -759,9 +759,6 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): if disk is not None: kwargs["disk_volume"] = parse_disk_string(disk) if "disk_volume" in kwargs: - if not all(isinstance(val, string_types) for val in kwargs["disk_volume"].values()): - self.module.warn("All disk_volume values must be strings. Converting non-string values to strings.") - kwargs["disk_volume"] = {key: to_text(val) for key, val in kwargs["disk_volume"].items()} disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume")) kwargs.update(disk_dict) if memory is not None: @@ -775,9 +772,6 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): if "mount_volumes" in kwargs: mounts_list = kwargs.pop("mount_volumes") for mount_config in mounts_list: - if not all(isinstance(val, string_types) for val in mount_config.values()): - self.module.warn("All mount_volumes values must be strings. Converting non-string values to strings.") - mount_config = {key: to_text(val) for key, val in mount_config.items()} key = mount_config.pop("id") mount_dict = build_volume(key=key, **mount_config) kwargs.update(mount_dict) From e3fb817a217a11827e72f1229fefefa73411480c Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Tue, 23 Jul 2024 17:59:25 +0200 Subject: [PATCH 171/482] pkgng - add option use_globs (default=true) (#8633) * pkgng - add option use_globs (default=true) #8632 * Fix lint. * Update changelogs/fragments/8632-pkgng-add-option-use_globs.yml Co-authored-by: Felix Fontein * Update plugins/modules/pkgng.py Co-authored-by: Felix Fontein * Update plugins/modules/pkgng.py Co-authored-by: Felix Fontein * Update tests/integration/targets/pkgng/tasks/install_single_package.yml Co-authored-by: Felix Fontein * Update plugins/modules/pkgng.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8632-pkgng-add-option-use_globs.yml | 2 ++ plugins/modules/pkgng.py | 28 +++++++++++++++---- .../pkgng/tasks/install_single_package.yml | 11 ++++++++ 3 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/8632-pkgng-add-option-use_globs.yml diff --git a/changelogs/fragments/8632-pkgng-add-option-use_globs.yml b/changelogs/fragments/8632-pkgng-add-option-use_globs.yml new file mode 100644 index 0000000000..d3e03959d5 --- /dev/null +++ b/changelogs/fragments/8632-pkgng-add-option-use_globs.yml @@ -0,0 +1,2 @@ +minor_changes: + - pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633). diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index e283311c8e..7a04ee3a6e 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -100,6 +100,13 @@ options: type: bool default: false version_added: 1.3.0 + use_globs: + description: + - Treat the package names as shell glob patterns. + required: false + type: bool + default: true + version_added: 9.3.0 author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. @@ -136,6 +143,12 @@ EXAMPLES = ''' community.general.pkgng: name: "*" state: latest + +- name: Upgrade foo/bar + community.general.pkgng: + name: foo/bar + state: latest + use_globs: false ''' @@ -146,7 +159,7 @@ from ansible.module_utils.basic import AnsibleModule def query_package(module, run_pkgng, name): - rc, out, err = run_pkgng('info', '-g', '-e', name) + rc, out, err = run_pkgng('info', '-e', name) return rc == 0 @@ -156,7 +169,7 @@ def query_update(module, run_pkgng, name): # Check to see if a package upgrade is available. # rc = 0, no updates available or package not installed # rc = 1, updates available - rc, out, err = run_pkgng('upgrade', '-g', '-n', name) + rc, out, err = run_pkgng('upgrade', '-n', name) return rc == 1 @@ -259,7 +272,7 @@ def install_packages(module, run_pkgng, packages, cached, state): action_count[action] += len(package_list) continue - pkgng_args = [action, '-g', '-U', '-y'] + package_list + pkgng_args = [action, '-U', '-y'] + package_list rc, out, err = run_pkgng(*pkgng_args) stdout += out stderr += err @@ -289,7 +302,7 @@ def install_packages(module, run_pkgng, packages, cached, state): def annotation_query(module, run_pkgng, package, tag): - rc, out, err = run_pkgng('info', '-g', '-A', package) + rc, out, err = run_pkgng('info', '-A', package) match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) if match: return match.group('value') @@ -424,7 +437,9 @@ def main(): rootdir=dict(required=False, type='path'), chroot=dict(required=False, type='path'), jail=dict(required=False, type='str'), - autoremove=dict(default=False, type='bool')), + autoremove=dict(default=False, type='bool'), + use_globs=dict(default=True, required=False, type='bool'), + ), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) @@ -465,6 +480,9 @@ def main(): def run_pkgng(action, *args, **kwargs): cmd = [pkgng_path, dir_arg, action] + if p["use_globs"] and action in ('info', 'install', 'upgrade',): + args = ('-g',) + args + pkgng_env = {'BATCH': 'yes'} if p["ignore_osver"]: diff --git a/tests/integration/targets/pkgng/tasks/install_single_package.yml b/tests/integration/targets/pkgng/tasks/install_single_package.yml index 5ba529af35..7f0886af8b 100644 --- a/tests/integration/targets/pkgng/tasks/install_single_package.yml +++ b/tests/integration/targets/pkgng/tasks/install_single_package.yml @@ -40,6 +40,16 @@ get_mime: false register: pkgng_install_stat_after +- name: Upgrade package (orig, no globs) + pkgng: + name: '{{ pkgng_test_pkg_category }}/{{ pkgng_test_pkg_name }}' + state: latest + use_globs: false + jail: '{{ pkgng_test_jail | default(omit) }}' + chroot: '{{ pkgng_test_chroot | default(omit) }}' + rootdir: '{{ pkgng_test_rootdir | default(omit) }}' + register: pkgng_upgrade_orig_noglobs + - name: Remove test package (if requested) pkgng: <<: *pkgng_install_params @@ -56,3 +66,4 @@ - not pkgng_install_idempotent_cached.stdout is match("Updating \w+ repository catalogue\.\.\.") - pkgng_install_stat_after.stat.exists - pkgng_install_stat_after.stat.executable + - pkgng_upgrade_orig_noglobs is not changed From 58f9860ba73d5caae8d74081156ab96b12e64086 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 24 Jul 2024 04:00:44 +1200 Subject: [PATCH 172/482] Fix pipx tests (#8665) * fix pipx tests * enable pipx int tests * replace ansible-lint with pylint in pipx test * install jupyter in freebsd * replace jupyter with mkdocs in pipx test * adjust installed dependency for mkdocs * fix pipx_info tests as well --- tests/integration/targets/pipx/aliases | 1 - tests/integration/targets/pipx/tasks/main.yml | 60 +++++++++---------- tests/integration/targets/pipx_info/aliases | 1 - .../targets/pipx_info/tasks/main.yml | 14 ++--- 4 files changed, 37 insertions(+), 39 deletions(-) diff --git a/tests/integration/targets/pipx/aliases b/tests/integration/targets/pipx/aliases index 66e6e1a3e6..9f87ec3480 100644 --- a/tests/integration/targets/pipx/aliases +++ b/tests/integration/targets/pipx/aliases @@ -6,4 +6,3 @@ azp/posix/2 destructive skip/python2 skip/python3.5 -disabled # TODO diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index 7eb0f11a6c..ad5e14104b 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -217,76 +217,76 @@ - "'tox' not in uninstall_tox_again.application" ############################################################################## -- name: ensure application ansible-lint is uninstalled +- name: ensure application pylint is uninstalled community.general.pipx: - name: ansible-lint + name: pylint state: absent -- name: install application ansible-lint +- name: install application pylint community.general.pipx: - name: ansible-lint - register: install_ansible_lint + name: pylint + register: install_pylint - name: inject packages community.general.pipx: state: inject - name: ansible-lint + name: pylint inject_packages: - licenses - register: inject_pkgs_ansible_lint + register: inject_pkgs_pylint - name: inject packages with apps community.general.pipx: state: inject - name: ansible-lint + name: pylint inject_packages: - black install_apps: true - register: inject_pkgs_apps_ansible_lint + register: inject_pkgs_apps_pylint -- name: cleanup ansible-lint +- name: cleanup pylint community.general.pipx: state: absent - name: ansible-lint - register: uninstall_ansible_lint + name: pylint + register: uninstall_pylint - name: check assertions inject_packages assert: that: - - install_ansible_lint is changed - - inject_pkgs_ansible_lint is changed - - '"ansible-lint" in inject_pkgs_ansible_lint.application' - - '"licenses" in inject_pkgs_ansible_lint.application["ansible-lint"]["injected"]' - - inject_pkgs_apps_ansible_lint is changed - - '"ansible-lint" in inject_pkgs_apps_ansible_lint.application' - - '"black" in inject_pkgs_apps_ansible_lint.application["ansible-lint"]["injected"]' - - uninstall_ansible_lint is changed + - install_pylint is changed + - inject_pkgs_pylint is changed + - '"pylint" in inject_pkgs_pylint.application' + - '"licenses" in inject_pkgs_pylint.application["pylint"]["injected"]' + - inject_pkgs_apps_pylint is changed + - '"pylint" in inject_pkgs_apps_pylint.application' + - '"black" in inject_pkgs_apps_pylint.application["pylint"]["injected"]' + - uninstall_pylint is changed ############################################################################## - name: install jupyter - not working smoothly in freebsd - when: ansible_system != 'FreeBSD' + # when: ansible_system != 'FreeBSD' block: - - name: ensure application jupyter is uninstalled + - name: ensure application mkdocs is uninstalled community.general.pipx: - name: jupyter + name: mkdocs state: absent - - name: install application jupyter + - name: install application mkdocs community.general.pipx: - name: jupyter + name: mkdocs install_deps: true - register: install_jupyter + register: install_mkdocs - - name: cleanup jupyter + - name: cleanup mkdocs community.general.pipx: state: absent - name: jupyter + name: mkdocs - name: check assertions assert: that: - - install_jupyter is changed - - '"ipython" in install_jupyter.stdout' + - install_mkdocs is changed + - '"markdown_py" in install_mkdocs.stdout' ############################################################################## - name: ensure /opt/pipx diff --git a/tests/integration/targets/pipx_info/aliases b/tests/integration/targets/pipx_info/aliases index e262b485a6..a28278bbc1 100644 --- a/tests/integration/targets/pipx_info/aliases +++ b/tests/integration/targets/pipx_info/aliases @@ -6,4 +6,3 @@ azp/posix/3 destructive skip/python2 skip/python3.5 -disabled # TODO diff --git a/tests/integration/targets/pipx_info/tasks/main.yml b/tests/integration/targets/pipx_info/tasks/main.yml index 0a01f0af9c..e3de105d6f 100644 --- a/tests/integration/targets/pipx_info/tasks/main.yml +++ b/tests/integration/targets/pipx_info/tasks/main.yml @@ -68,7 +68,7 @@ apps: - name: tox source: tox==3.24.0 - - name: ansible-lint + - name: pylint inject_packages: - licenses @@ -81,7 +81,7 @@ - name: install applications community.general.pipx: name: "{{ item.name }}" - source: "{{ item.source|default(omit) }}" + source: "{{ item.source | default(omit) }}" loop: "{{ apps }}" - name: inject packages @@ -102,9 +102,9 @@ include_injected: true register: info2_all_deps -- name: retrieve application ansible-lint +- name: retrieve application pylint community.general.pipx_info: - name: ansible-lint + name: pylint include_deps: true include_injected: true register: info2_lint @@ -131,10 +131,10 @@ - "'injected' in all_apps_deps[0]" - "'licenses' in all_apps_deps[0].injected" - - lint|length == 1 + - lint | length == 1 - all_apps_deps|length == 2 - lint[0] == all_apps_deps[0] vars: all_apps: "{{ info2_all.application|sort(attribute='name') }}" - all_apps_deps: "{{ info2_all_deps.application|sort(attribute='name') }}" - lint: "{{ info2_lint.application|sort(attribute='name') }}" + all_apps_deps: "{{ info2_all_deps.application | sort(attribute='name') }}" + lint: "{{ info2_lint.application | sort(attribute='name') }}" From 52126b8fae99d79591cb8994bdcf010f126b8771 Mon Sep 17 00:00:00 2001 From: Matthieu Bourgain Date: Tue, 23 Jul 2024 18:01:37 +0200 Subject: [PATCH 173/482] Add TLS certs params to redis (#8654) * add tls params to redis * add PR number * add example * move doc to redis fragment * Update changelogs/fragments/8654-add-redis-tls-params.yml Co-authored-by: Felix Fontein * rm aliases and add version_added --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8654-add-redis-tls-params.yml | 2 ++ plugins/doc_fragments/redis.py | 10 ++++++++++ plugins/module_utils/redis.py | 8 +++++++- plugins/modules/redis.py | 10 ++++++++++ tests/unit/plugins/modules/test_redis_info.py | 8 ++++++++ 5 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8654-add-redis-tls-params.yml diff --git a/changelogs/fragments/8654-add-redis-tls-params.yml b/changelogs/fragments/8654-add-redis-tls-params.yml new file mode 100644 index 0000000000..0b549f5dd0 --- /dev/null +++ b/changelogs/fragments/8654-add-redis-tls-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index fafb52c86c..69fd0c9cd5 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -49,6 +49,16 @@ options: - Path to root certificates file. If not set and O(tls) is set to V(true), certifi ca-certificates will be used. type: str + client_cert_file: + description: + - Path to the client certificate file. + type: str + version_added: 9.3.0 + client_key_file: + description: + - Path to the client private key file. + type: str + version_added: 9.3.0 requirements: [ "redis", "certifi" ] notes: diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index c4d87aca51..e823f966dc 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -57,7 +57,9 @@ def redis_auth_argument_spec(tls_default=True): validate_certs=dict(type='bool', default=True ), - ca_certs=dict(type='str') + ca_certs=dict(type='str'), + client_cert_file=dict(type='str'), + client_key_file=dict(type='str'), ) @@ -71,6 +73,8 @@ def redis_auth_params(module): ca_certs = module.params['ca_certs'] if tls and ca_certs is None: ca_certs = str(certifi.where()) + client_cert_file = module.params['client_cert_file'] + client_key_file = module.params['client_key_file'] if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: module.fail_json( msg='The option `username` in only supported with redis >= 3.4.0.') @@ -78,6 +82,8 @@ def redis_auth_params(module): 'port': login_port, 'password': login_password, 'ssl_ca_certs': ca_certs, + 'ssl_certfile': client_cert_file, + 'ssl_keyfile': client_key_file, 'ssl_cert_reqs': validate_certs, 'ssl': tls} if login_user is not None: diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py index 207927cb77..a30b89922c 100644 --- a/plugins/modules/redis.py +++ b/plugins/modules/redis.py @@ -132,6 +132,16 @@ EXAMPLES = ''' command: config name: lua-time-limit value: 100 + +- name: Connect using TLS and certificate authentication + community.general.redis: + command: config + name: lua-time-limit + value: 100 + tls: true + ca_certs: /etc/redis/certs/ca.crt + client_cert_file: /etc/redis/certs/redis.crt + client_key_file: /etc/redis/certs/redis.key ''' import traceback diff --git a/tests/unit/plugins/modules/test_redis_info.py b/tests/unit/plugins/modules/test_redis_info.py index cdc78680e5..831b8f4052 100644 --- a/tests/unit/plugins/modules/test_redis_info.py +++ b/tests/unit/plugins/modules/test_redis_info.py @@ -55,6 +55,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': None, 'ssl': False, 'ssl_ca_certs': None, + 'ssl_certfile': None, + 'ssl_keyfile': None, 'ssl_cert_reqs': 'required'},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') @@ -74,6 +76,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': 'PASS', 'ssl': False, 'ssl_ca_certs': None, + 'ssl_certfile': None, + 'ssl_keyfile': None, 'ssl_cert_reqs': 'required'},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') @@ -87,6 +91,8 @@ class TestRedisInfoModule(ModuleTestCase): 'login_password': 'PASS', 'tls': True, 'ca_certs': '/etc/ssl/ca.pem', + 'client_cert_file': '/etc/ssl/client.pem', + 'client_key_file': '/etc/ssl/client.key', 'validate_certs': False }) self.module.main() @@ -96,6 +102,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': 'PASS', 'ssl': True, 'ssl_ca_certs': '/etc/ssl/ca.pem', + 'ssl_certfile': '/etc/ssl/client.pem', + 'ssl_keyfile': '/etc/ssl/client.key', 'ssl_cert_reqs': None},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') From 1a8f172186e82126d922686c2226f34c1a2a5db1 Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Sat, 27 Jul 2024 02:37:14 -0500 Subject: [PATCH 174/482] Introduce bootc functionality (#8606) * introduce bootc functionality Signed-off-by: Ryan Cook Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * fix of test Signed-off-by: Ryan Cook * switch stdout var Signed-off-by: Ryan Cook * Feedback on NOTE format Co-authored-by: Felix Fontein * addition of trailing comma Co-authored-by: Felix Fontein * addition of trailing comma Co-authored-by: Felix Fontein * incorporating feedback from russoz Signed-off-by: Ryan Cook * error in stdout Signed-off-by: Ryan Cook * proper rc checking and status Signed-off-by: Ryan Cook * linting Signed-off-by: Ryan Cook * Update version Co-authored-by: Felix Fontein --------- Signed-off-by: Ryan Cook Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/bootc_manage.py | 95 +++++++++++++++++++ .../unit/plugins/modules/test_bootc_manage.py | 72 ++++++++++++++ 3 files changed, 169 insertions(+) create mode 100644 plugins/modules/bootc_manage.py create mode 100644 tests/unit/plugins/modules/test_bootc_manage.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 9eb521018f..6fabe92dee 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -448,6 +448,8 @@ files: maintainers: hkariti $modules/bitbucket_: maintainers: catcombo + $modules/bootc_manage.py: + maintainers: cooktheryan $modules/bower.py: maintainers: mwarkentin $modules/btrfs_: diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py new file mode 100644 index 0000000000..5628ffcca0 --- /dev/null +++ b/plugins/modules/bootc_manage.py @@ -0,0 +1,95 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Ryan Cook +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt +# or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: bootc_manage +version_added: 9.3.0 +author: +- Ryan Cook (@cooktheryan) +short_description: Bootc Switch and Upgrade +description: + - This module manages the switching and upgrading of C(bootc). +options: + state: + description: + - 'Control to apply the latest image or switch the image.' + - 'B(Note:) This will not reboot the system.' + - 'Please use M(ansible.builtin.reboot) to reboot the system.' + required: true + type: str + choices: ['switch', 'latest'] + image: + description: + - 'The image to switch to.' + - 'This is required when O(state=switch).' + required: false + type: str + +''' + +EXAMPLES = ''' +# Switch to a different image +- name: Provide image to switch to a different image and retain the current running image + community.general.bootc_manage: + state: switch + image: "example.com/image:latest" + +# Apply updates of the current running image +- name: Apply updates of the current running image + community.general.bootc_manage: + state: latest +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['switch', 'latest']), + image=dict(type='str', required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'switch', ['image']), + ], + ) + + state = module.params['state'] + image = module.params['image'] + + if state == 'switch': + command = ['bootc', 'switch', image, '--retain'] + elif state == 'latest': + command = ['bootc', 'upgrade'] + + locale = get_best_parsable_locale(module) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + rc, stdout, err = module.run_command(command, check_rc=True) + + if 'Queued for next boot: ' in stdout: + result = {'changed': True, 'stdout': stdout} + module.exit_json(**result) + elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: + result = {'changed': False, 'stdout': stdout} + module.exit_json(**result) + else: + result = {'changed': False, 'stderr': err} + module.fail_json(msg='ERROR: Command execution failed.', **result) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_bootc_manage.py b/tests/unit/plugins/modules/test_bootc_manage.py new file mode 100644 index 0000000000..5393a57a07 --- /dev/null +++ b/tests/unit/plugins/modules/test_bootc_manage.py @@ -0,0 +1,72 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.plugins.modules import bootc_manage +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +class TestBootcManageModule(ModuleTestCase): + + def setUp(self): + super(TestBootcManageModule, self).setUp() + self.module = bootc_manage + + def tearDown(self): + super(TestBootcManageModule, self).tearDown() + + def test_switch_without_image(self): + """Failure if state is 'switch' but no image provided""" + set_module_args({'state': 'switch'}) + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], "state is switch but all of the following are missing: image") + + def test_switch_with_image(self): + """Test successful switch with image provided""" + set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['changed']) + + def test_latest_state(self): + """Test successful upgrade to the latest state""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['changed']) + + def test_latest_state_no_change(self): + """Test no change for latest state""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'No changes in ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertFalse(result.exception.args[0]['changed']) + + def test_switch_image_failure(self): + """Test failure during image switch""" + set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') + + def test_latest_state_failure(self): + """Test failure during upgrade""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') From 37c8560542f4dd52fe695d23ebc043e52c6a6b8c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 28 Jul 2024 13:09:46 +0200 Subject: [PATCH 175/482] Type options of cache plugins (#8624) * Type options of cache plugins. * Do not change type of _timeout for now. --- changelogs/fragments/8624-cache-types.yml | 2 ++ plugins/cache/memcached.py | 4 +++- plugins/cache/pickle.py | 3 +++ plugins/cache/redis.py | 7 ++++++- plugins/cache/yaml.py | 3 +++ 5 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8624-cache-types.yml diff --git a/changelogs/fragments/8624-cache-types.yml b/changelogs/fragments/8624-cache-types.yml new file mode 100644 index 0000000000..8efa34b6c0 --- /dev/null +++ b/changelogs/fragments/8624-cache-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - "memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624)." diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 0bc5256b3f..93131172c5 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -29,6 +29,7 @@ DOCUMENTATION = ''' section: defaults _prefix: description: User defined prefix to use when creating the DB entries + type: string default: ansible_facts env: - name: ANSIBLE_CACHE_PLUGIN_PREFIX @@ -37,13 +38,14 @@ DOCUMENTATION = ''' section: defaults _timeout: default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire env: - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT ini: - key: fact_caching_timeout section: defaults - type: integer ''' import collections diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 06b673921e..aeffa68939 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -24,6 +24,7 @@ DOCUMENTATION = ''' ini: - key: fact_caching_connection section: defaults + type: path _prefix: description: User defined prefix to use when creating the files env: @@ -31,6 +32,7 @@ DOCUMENTATION = ''' ini: - key: fact_caching_prefix section: defaults + type: string _timeout: default: 86400 description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire @@ -39,6 +41,7 @@ DOCUMENTATION = ''' ini: - key: fact_caching_timeout section: defaults + type: float ''' try: diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index c43b1dbb5e..e01083e863 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. + type: string required: true env: - name: ANSIBLE_CACHE_PLUGIN_CONNECTION @@ -29,6 +30,7 @@ DOCUMENTATION = ''' section: defaults _prefix: description: User defined prefix to use when creating the DB entries + type: string default: ansible_facts env: - name: ANSIBLE_CACHE_PLUGIN_PREFIX @@ -37,6 +39,7 @@ DOCUMENTATION = ''' section: defaults _keyset_name: description: User defined name for cache keyset name. + type: string default: ansible_cache_keys env: - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME @@ -46,6 +49,7 @@ DOCUMENTATION = ''' version_added: 1.3.0 _sentinel_service_name: description: The redis sentinel service name (or referenced as cluster name). + type: string env: - name: ANSIBLE_CACHE_REDIS_SENTINEL ini: @@ -54,13 +58,14 @@ DOCUMENTATION = ''' version_added: 1.3.0 _timeout: default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire env: - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT ini: - key: fact_caching_timeout section: defaults - type: integer ''' import re diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index 3a5ddf3e6f..a3d6f34521 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -24,6 +24,7 @@ DOCUMENTATION = ''' ini: - key: fact_caching_connection section: defaults + type: string _prefix: description: User defined prefix to use when creating the files env: @@ -31,6 +32,7 @@ DOCUMENTATION = ''' ini: - key: fact_caching_prefix section: defaults + type: string _timeout: default: 86400 description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire @@ -40,6 +42,7 @@ DOCUMENTATION = ''' - key: fact_caching_timeout section: defaults type: integer + # TODO: determine whether it is OK to change to: type: float ''' From 229ed6dad934218861308e650c3c2c8d4c7413a4 Mon Sep 17 00:00:00 2001 From: Thomas Bach <63091663+thomasbach-dev@users.noreply.github.com> Date: Thu, 1 Aug 2024 17:10:11 +0200 Subject: [PATCH 176/482] Add a keycloak module to query keys metadata (#8605) * feat(keycloak): module to query keys metadata * chore: add thomasbach-dev as maintainer in team_keycloak * test: adding a unit test for keycloak_real_keys_metadata_info module * fixup! feat(keycloak): module to query keys metadata --- .github/BOTMETA.yml | 2 +- .../identity/keycloak/keycloak.py | 32 +++ .../keycloak_realm_keys_metadata_info.py | 133 +++++++++++++ .../test_keycloak_realm_keys_metadata_info.py | 183 ++++++++++++++++++ 4 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/keycloak_realm_keys_metadata_info.py create mode 100644 tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6fabe92dee..65942ba740 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1528,7 +1528,7 @@ macros: team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 team_ipa: Akasurde Nosmoht justchris1 team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt mattock + team_keycloak: eikef ndclt mattock thomasbach-dev team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index b2a1892503..020b185a30 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -19,6 +19,7 @@ from ansible.module_utils.common.text.converters import to_native, to_text URL_REALM_INFO = "{url}/realms/{realm}" URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" +URL_REALM_KEYS_METADATA = "{url}/admin/realms/{realm}/keys" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" @@ -306,6 +307,37 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) + def get_realm_keys_metadata_by_id(self, realm='master'): + """Obtain realm public info by id + + :param realm: realm id + + :return: None, or a 'KeysMetadataRepresentation' + (https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation) + -- a dict containing the keys 'active' and 'keys', the former containing a mapping + from algorithms to key-ids, the latter containing a list of dicts with key + information. + """ + realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) + + try: + return json.loads(to_native(open_url(realm_keys_metadata_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..ef4048b891 --- /dev/null +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: keycloak_realm_keys_metadata_info + +short_description: Allows obtaining Keycloak realm keys metadata via Keycloak API + +version_added: 9.3.0 + +description: + - This module allows you to get Keycloak realm keys metadata via the Keycloak REST API. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). + +options: + realm: + type: str + description: + - They Keycloak realm to fetch keys metadata. + default: 'master' + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Thomas Bach (@thomasbach-dev) +""" + +EXAMPLES = """ +- name: Fetch Keys metadata + community.general.keycloak_realm_keys_metadata_info: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + delegate_to: localhost + register: keycloak_keys_metadata + +- name: Write the Keycloak keys certificate into a file + ansible.builtin.copy: + dest: /tmp/keycloak.cert + content: | + {{ keys_metadata['keycloak_keys_metadata']['keys'] + | selectattr('algorithm', 'equalto', 'RS256') + | map(attribute='certificate') + | first + }} + delegate_to: localhost +""" + +RETURN = """ +msg: + description: Message as to what action was taken. + returned: always + type: str + +keys_metadata: + description: + + - Representation of the realm keys metadata (see + U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)). + + returned: always + type: dict + contains: + active: + description: A mapping (that is, a dict) from key algorithms to UUIDs. + type: dict + returned: always + keys: + description: A list of dicts providing detailed information on the keys. + type: list + elements: dict + returned: always +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token, keycloak_argument_spec) + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default="master"), + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([["token", "auth_realm", "auth_username", "auth_password"]]), + required_together=([["auth_realm", "auth_username", "auth_password"]]), + ) + + result = dict(changed=False, msg="", keys_metadata="") + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get("realm") + + keys_metadata = kc.get_realm_keys_metadata_by_id(realm=realm) + + result["keys_metadata"] = keys_metadata + result["msg"] = "Get realm keys metadata successful for ID {realm}".format( + realm=realm + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..14d36f6aab --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager +from itertools import count + +from ansible.module_utils.six import StringIO +from ansible_collections.community.general.plugins.modules import \ + keycloak_realm_keys_metadata_info +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, ModuleTestCase, set_module_args) + + +@contextmanager +def patch_keycloak_api(side_effect): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_realm_keys_metadata_info.KeycloakAPI + with patch.object(obj, "get_realm_keys_metadata_by_id", side_effect=side_effect) as obj_mocked: + yield obj_mocked + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count + ) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count + ) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs["method"] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } + return patch( + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", + side_effect=build_mocked_request(count(), token_response), + autospec=True, + ) + + +class TestKeycloakRealmRole(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmRole, self).setUp() + self.module = keycloak_realm_keys_metadata_info + + def test_get_public_info(self): + """Get realm public info""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "token": "{{ access_token }}", + "realm": "my-realm", + } + return_value = [ + { + "active": { + "AES": "aba3778d-d69d-4240-a578-a30720dbd3ca", + "HS512": "6e4fe29d-a7e4-472b-a348-298d8ae45dcc", + "RS256": "jaON84xLYg2fsKiV4p3wZag_S8MTjAp-dkpb1kRqzEs", + "RSA-OAEP": "3i_GikMqBBxtqhWXwpucxMvwl55jYlhiNIvxDTgNAEk", + }, + "keys": [ + { + "algorithm": "HS512", + "kid": "6e4fe29d-a7e4-472b-a348-298d8ae45dcc", + "providerId": "225dbe0b-3fc4-4e0d-8479-90a0cbc8adf7", + "providerPriority": 100, + "status": "ACTIVE", + "type": "OCT", + "use": "SIG", + }, + { + "algorithm": "RS256", + "certificate": "MIIC…", + "kid": "jaON84xLYg2fsKiV4p3wZag_S8MTjAp-dkpb1kRqzEs", + "providerId": "98c1ebeb-c690-4c5c-8b32-81bebe264cda", + "providerPriority": 100, + "publicKey": "MIIB…", + "status": "ACTIVE", + "type": "RSA", + "use": "SIG", + "validTo": 2034748624000, + }, + { + "algorithm": "AES", + "kid": "aba3778d-d69d-4240-a578-a30720dbd3ca", + "providerId": "99c70057-9b8d-4177-a83c-de2d081139e8", + "providerPriority": 100, + "status": "ACTIVE", + "type": "OCT", + "use": "ENC", + }, + { + "algorithm": "RSA-OAEP", + "certificate": "MIIC…", + "kid": "3i_GikMqBBxtqhWXwpucxMvwl55jYlhiNIvxDTgNAEk", + "providerId": "ab3de3fb-a32d-4be8-8324-64aa48d14c36", + "providerPriority": 100, + "publicKey": "MIIB…", + "status": "ACTIVE", + "type": "RSA", + "use": "ENC", + "validTo": 2034748625000, + }, + ], + } + ] + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(side_effect=return_value) as ( + mock_get_realm_keys_metadata_by_id + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + result = exec_info.exception.args[0] + self.assertIs(result["changed"], False) + self.assertEqual( + result["msg"], "Get realm keys metadata successful for ID my-realm" + ) + self.assertEqual(result["keys_metadata"], return_value[0]) + + self.assertEqual(len(mock_get_realm_keys_metadata_by_id.mock_calls), 1) + + +if __name__ == "__main__": + unittest.main() From 2963004991dcbd8d979ef69a146c5bbbb70c52e6 Mon Sep 17 00:00:00 2001 From: Kit Ham Date: Fri, 2 Aug 2024 03:11:23 +1200 Subject: [PATCH 177/482] homebrew: Add support for services functions (#8329) * Homebrew: Add support for services functions Fixes #8286. Add a homebrew.services module for starting and stopping services that are attached to homebrew packages. * Address python version compatibility * Addressing reviewer comments * Addressing sanity logs * Address str format issues * Fixing Python 2.7 syntax issues * Test alias, BOTMETA, grammar * Attempt to fix brew in tests * Address comments by russoz * Fixing more dumb typos * Actually uninstall black * Update version_added in plugins/modules/homebrew_services.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 5 + plugins/module_utils/homebrew.py | 27 ++ plugins/modules/homebrew_services.py | 256 ++++++++++++++++++ .../targets/homebrew_services/aliases | 9 + .../homebrew_services/handlers/main.yml | 11 + .../targets/homebrew_services/tasks/main.yml | 86 ++++++ 6 files changed, 394 insertions(+) create mode 100644 plugins/modules/homebrew_services.py create mode 100644 tests/integration/targets/homebrew_services/aliases create mode 100644 tests/integration/targets/homebrew_services/handlers/main.yml create mode 100644 tests/integration/targets/homebrew_services/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 65942ba740..f73da1e874 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -648,6 +648,11 @@ files: labels: homebrew_ macos maintainers: $team_macos notify: chris-short + $modules/homebrew_services.py: + ignore: ryansb + keywords: brew cask services darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos kitizz $modules/homectl.py: maintainers: jameslivulpi $modules/honeybadger_deployment.py: diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py index 2816832109..4b5c4672e4 100644 --- a/plugins/module_utils/homebrew.py +++ b/plugins/module_utils/homebrew.py @@ -113,3 +113,30 @@ class HomebrewValidate(object): return isinstance( package, string_types ) and not cls.INVALID_PACKAGE_REGEX.search(package) + + +def parse_brew_path(module): + # type: (...) -> str + """Attempt to find the Homebrew executable path. + + Requires: + - module has a `path` parameter + - path is a valid path string for the target OS. Otherwise, module.fail_json() + is called with msg="Invalid_path: ". + """ + path = module.params["path"] + if not HomebrewValidate.valid_path(path): + module.fail_json(msg="Invalid path: {0}".format(path)) + + if isinstance(path, string_types): + paths = path.split(":") + elif isinstance(path, list): + paths = path + else: + module.fail_json(msg="Invalid path: {0}".format(path)) + + brew_path = module.get_bin_path("brew", required=True, opt_dirs=paths) + if not HomebrewValidate.valid_brew_path(brew_path): + module.fail_json(msg="Invalid brew path: {0}".format(brew_path)) + + return brew_path diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py new file mode 100644 index 0000000000..2794025b29 --- /dev/null +++ b/plugins/modules/homebrew_services.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2024, Kit Ham +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: homebrew_services +author: + - "Kit Ham (@kitizz)" +requirements: + - homebrew must already be installed on the target system +short_description: Services manager for Homebrew +version_added: 9.3.0 +description: + - Manages daemons and services via Homebrew. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An installed homebrew package whose service is to be updated. + aliases: [ 'formula' ] + type: str + required: true + path: + description: + - "A V(:) separated list of paths to search for C(brew) executable. + Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of C(brew) command, + providing an alternative C(brew) path enables managing different set of packages in an alternative location in the system." + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package's service. + choices: [ 'present', 'absent', 'restarted' ] + default: present + type: str +""" + +EXAMPLES = """ +- name: Install foo package + community.general.homebrew: + name: foo + state: present + +- name: Start the foo service (equivalent to `brew services start foo`) + community.general.homebrew_service: + name: foo + state: present + +- name: Restart the foo service (equivalent to `brew services restart foo`) + community.general.homebrew_service: + name: foo + state: restarted + +- name: Remove the foo service (equivalent to `brew services stop foo`) + community.general.homebrew_service: + name: foo + service_state: absent +""" + +RETURN = """ +pid: + description: + - If the service is now running, this is the PID of the service, otherwise -1. + returned: success + type: int + sample: 1234 +running: + description: + - Whether the service is running after running this command. + returned: success + type: bool + sample: true +""" + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, + parse_brew_path, +) + +if sys.version_info < (3, 5): + from collections import namedtuple + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = namedtuple( + "HomebrewServiceArgs", ["name", "state", "brew_path"] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) + +else: + from typing import NamedTuple, Optional + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = NamedTuple( + "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = NamedTuple( + "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] + ) + + +def _brew_service_state(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> HomebrewServiceState + cmd = [args.brew_path, "services", "info", args.name, "--json"] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + + try: + data = json.loads(stdout)[0] + except json.JSONDecodeError: + module.fail_json(msg="Failed to parse JSON output:\n{0}".format(stdout)) + + return HomebrewServiceState(running=data["status"] == "started", pid=data["pid"]) + + +def _exit_with_state(args, module, changed=False, message=None): + # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None + state = _brew_service_state(args, module) + if message is None: + message = ( + "Running: {state.running}, Changed: {changed}, PID: {state.pid}".format( + state=state, changed=changed + ) + ) + module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) + + +def validate_and_load_arguments(module): + # type: (AnsibleModule) -> HomebrewServiceArgs + """Reuse the Homebrew module's validation logic to validate these arguments.""" + package = module.params["name"] # type: ignore + if not HomebrewValidate.valid_package(package): + module.fail_json(msg="Invalid package name: {0}".format(package)) + + state = module.params["state"] # type: ignore + if state not in ["present", "absent", "restarted"]: + module.fail_json(msg="Invalid state: {0}".format(state)) + + brew_path = parse_brew_path(module) + + return HomebrewServiceArgs(name=package, state=state, brew_path=brew_path) + + +def start_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Start the requested brew service if it is not already running.""" + state = _brew_service_state(args, module) + if state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already running") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be started") + + start_cmd = [args.brew_path, "services", "start", args.name] + rc, stdout, stderr = module.run_command(start_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def stop_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Stop the requested brew service if it is running.""" + state = _brew_service_state(args, module) + if not state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already stopped") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be stopped") + + stop_cmd = [args.brew_path, "services", "stop", args.name] + rc, stdout, stderr = module.run_command(stop_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def restart_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Restart the requested brew service. This always results in a change.""" + if module.check_mode: + _exit_with_state( + args, module, changed=True, message="Service would be restarted" + ) + + restart_cmd = [args.brew_path, "services", "restart", args.name] + rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["formula"], + required=True, + type="str", + ), + state=dict( + choices=["present", "absent", "restarted"], + default="present", + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type="path", + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + # Pre-validate arguments. + service_args = validate_and_load_arguments(module) + + # Choose logic based on the desired state. + if service_args.state == "present": + start_service(service_args, module) + elif service_args.state == "absent": + stop_service(service_args, module) + elif service_args.state == "restarted": + restart_service(service_args, module) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/homebrew_services/aliases b/tests/integration/targets/homebrew_services/aliases new file mode 100644 index 0000000000..bd478505d9 --- /dev/null +++ b/tests/integration/targets/homebrew_services/aliases @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 +skip/aix +skip/freebsd +skip/rhel +skip/docker diff --git a/tests/integration/targets/homebrew_services/handlers/main.yml b/tests/integration/targets/homebrew_services/handlers/main.yml new file mode 100644 index 0000000000..18856120d0 --- /dev/null +++ b/tests/integration/targets/homebrew_services/handlers/main.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: uninstall black + community.general.homebrew: + name: black + state: absent + become: true + become_user: "{{ brew_stat.stat.pw_name }}" diff --git a/tests/integration/targets/homebrew_services/tasks/main.yml b/tests/integration/targets/homebrew_services/tasks/main.yml new file mode 100644 index 0000000000..1d524715ca --- /dev/null +++ b/tests/integration/targets/homebrew_services/tasks/main.yml @@ -0,0 +1,86 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Don't run this test for non-MacOS systems. +- meta: end_play + when: ansible_facts.distribution != 'MacOSX' + +- name: MACOS | Find brew binary + command: which brew + register: brew_which + +- name: MACOS | Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + +- name: Homebrew Services test block + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + block: + - name: MACOS | Install black + community.general.homebrew: + name: black + state: present + register: install_result + notify: + - uninstall black + + - name: Check the black service is installed + assert: + that: + - install_result is success + + - name: Start the black service + community.general.homebrew_services: + name: black + state: present + register: start_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is running + assert: + that: + - start_result is success + + - name: Start the black service when already started + community.general.homebrew_services: + name: black + state: present + register: start_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check for idempotency + assert: + that: + - start_result.changed == 0 + + - name: Restart the black service + community.general.homebrew_services: + name: black + state: restarted + register: restart_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is restarted + assert: + that: + - restart_result is success + + - name: Stop the black service + community.general.homebrew_services: + name: black + state: present + register: stop_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is stopped + assert: + that: + - stop_result is success From 7bbf32dc0e761bb57cf2a6fc5031f5e4b7240f7d Mon Sep 17 00:00:00 2001 From: inDane Date: Thu, 1 Aug 2024 17:11:52 +0200 Subject: [PATCH 178/482] Update proxmox.py (#8657) * Update proxmox.py Added an example to create a new container with more network options (with ipv6 static configuration) * Update proxmox.py Made the linter happy. * cleaned up dictionaries Changed dictionaries from this format: netif: '{"net0":"name=eth0,g... to this: netif: net0: "name=eth0,g... * Update proxmox.py false intendation and trailing whitespaces --- plugins/modules/proxmox.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 104a896362..775f1a6b52 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -401,7 +401,8 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' + netif: + net0: "name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0" - name: Create new container with minimal options defining network interface with static ip community.general.proxmox: @@ -413,7 +414,21 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0" + +- name: Create new container with more options defining network interface with static ip4 and ip6 with vlan-tag and mtu + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,ip6=fe80::1227/64,gw6=fe80::1,bridge=vmbr0,firewall=1,tag=934,mtu=1500" - name: Create new container with minimal options defining a mount with 8GB community.general.proxmox: @@ -425,7 +440,8 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: '{"mp0":"local:8,mp=/mnt/test/"}' + mounts: + mp0: "local:8,mp=/mnt/test/" - name: Create new container with minimal options defining a mount with 8GB using mount_volumes community.general.proxmox: @@ -511,7 +527,8 @@ EXAMPLES = r''' api_user: root@pam api_password: 1q2w3e api_host: node1 - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0"}' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0" update: true - name: Start container From b6c6253bfc8bd34e3f45ece1d3cb5df57d965214 Mon Sep 17 00:00:00 2001 From: Andreas Perhab Date: Thu, 1 Aug 2024 17:16:24 +0200 Subject: [PATCH 179/482] fix(modules/gitlab_runners): pass paused to gitlab (#8648) --- changelogs/fragments/8648-fix-gitlab-runner-paused.yaml | 2 ++ plugins/modules/gitlab_runner.py | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8648-fix-gitlab-runner-paused.yaml diff --git a/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml b/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml new file mode 100644 index 0000000000..d064725f14 --- /dev/null +++ b/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml @@ -0,0 +1,2 @@ +bugfixes: + - "gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648)." \ No newline at end of file diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index 96b3eb3fa4..b11e029103 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -466,6 +466,7 @@ def main(): state = module.params['state'] runner_description = module.params['description'] runner_active = module.params['active'] + runner_paused = module.params['paused'] tag_list = module.params['tag_list'] run_untagged = module.params['run_untagged'] runner_locked = module.params['locked'] @@ -500,7 +501,7 @@ def main(): module.exit_json(changed=False, msg="Runner deleted or does not exists") if state == 'present': - if gitlab_runner.create_or_update_runner(runner_description, { + runner_values = { "active": runner_active, "tag_list": tag_list, "run_untagged": run_untagged, @@ -510,7 +511,11 @@ def main(): "registration_token": registration_token, "group": group, "project": project, - }): + } + if LooseVersion(gitlab_runner._gitlab.version()[0]) >= LooseVersion("14.8.0"): + # the paused attribute for runners is available since 14.8 + runner_values["paused"] = runner_paused + if gitlab_runner.create_or_update_runner(runner_description, runner_values): module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, msg="Successfully created or updated the runner %s" % runner_description) else: From fd811df414095c7c268e09218520dc9db03da1e8 Mon Sep 17 00:00:00 2001 From: Mateusz Kiersnowski <82416937+Ganji00@users.noreply.github.com> Date: Thu, 1 Aug 2024 17:25:02 +0200 Subject: [PATCH 180/482] Update timezone.py (#8692) in order to set a timezone, root priviliages are needed on most distros, therefore i suggest to change an example to make it plug and play ready. --- plugins/modules/timezone.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index e027290e86..790795140b 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -75,6 +75,7 @@ diff: EXAMPLES = r''' - name: Set timezone to Asia/Tokyo + become: true community.general.timezone: name: Asia/Tokyo ''' From c517f1c483c546542b7cf365925dfd75cce4ff66 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 4 Aug 2024 10:09:12 +1200 Subject: [PATCH 181/482] ensure util-linux-extra is installed in Ubuntu 24.04 (#8710) --- plugins/modules/timezone.py | 3 ++- tests/integration/targets/timezone/tasks/main.yml | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index 790795140b..cd823e6115 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -49,7 +49,8 @@ options: aliases: [ rtc ] choices: [ local, UTC ] notes: - - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On Ubuntu 24.04 the C(util-linux-extra) package is required to provide the C(hwclock) command. + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone. - On AIX only Olson/tz database timezones are usable (POSIX is not supported). An OS reboot is also required on AIX for the new timezone setting to take effect. Note that AIX 6.1+ is needed (OS level 61 or newer). diff --git a/tests/integration/targets/timezone/tasks/main.yml b/tests/integration/targets/timezone/tasks/main.yml index 721341592a..475f22447d 100644 --- a/tests/integration/targets/timezone/tasks/main.yml +++ b/tests/integration/targets/timezone/tasks/main.yml @@ -60,6 +60,14 @@ state: present when: ansible_distribution == 'Alpine' +- name: make sure hwclock is installed in Ubuntu 24.04 + package: + name: util-linux-extra + state: present + when: + - ansible_distribution == 'Ubuntu' + - ansible_facts.distribution_major_version is version('24', '>=') + - name: make sure the dbus service is started under systemd systemd: name: dbus From 132faeae3455e3e57cca8af70c314a2b244f4087 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 6 Aug 2024 07:06:11 +1200 Subject: [PATCH 182/482] gconftool2: minor refactor (#8711) * gconftool2: minor refactor * add changelog frag --- changelogs/fragments/8711-gconftool2-refactor.yml | 2 ++ plugins/modules/gconftool2.py | 13 +++++-------- 2 files changed, 7 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8711-gconftool2-refactor.yml diff --git a/changelogs/fragments/8711-gconftool2-refactor.yml b/changelogs/fragments/8711-gconftool2-refactor.yml new file mode 100644 index 0000000000..ae214d95ec --- /dev/null +++ b/changelogs/fragments/8711-gconftool2-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index db7c6dc883..deae8a2f16 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -127,9 +127,8 @@ class GConftool(StateModuleHelper): def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) - if self.vars.state != "get": - if not self.vars.direct and self.vars.config_source is not None: - self.module.fail_json(msg='If the "config_source" is specified then "direct" must be "true"') + if not self.vars.direct and self.vars.config_source is not None: + self.do_raise('If the "config_source" is specified then "direct" must be "true"') self.vars.set('previous_value', self._get(), fact=True) self.vars.set('value_type', self.vars.value_type) @@ -140,7 +139,7 @@ class GConftool(StateModuleHelper): def _make_process(self, fail_on_err): def process(rc, out, err): if err and fail_on_err: - self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err))) + self.do_raise('gconftool-2 failed with error:\n%s' % err.strip()) out = out.rstrip() self.vars.value = None if out == "" else out return self.vars.value @@ -152,16 +151,14 @@ class GConftool(StateModuleHelper): def state_absent(self): with self.runner("state key", output_process=self._make_process(False)) as ctx: ctx.run() - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.set('new_value', None, fact=True) self.vars._value = None def state_present(self): with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: ctx.run() - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.set('new_value', self._get(), fact=True) self.vars._value = self.vars.new_value From 5b2711bbd37562a60fdc87ec91fc9357e4176e40 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 8 Aug 2024 01:00:26 +1200 Subject: [PATCH 183/482] pipx - add suffix parameter (#8675) * initial commit * add changelog frag * Add idempotency when using suffix --- .../fragments/8675-pipx-install-suffix.yml | 2 + plugins/module_utils/pipx.py | 2 +- plugins/modules/pipx.py | 47 ++++++++++++++----- tests/integration/targets/pipx/tasks/main.yml | 35 +++++++++++++- 4 files changed, 71 insertions(+), 15 deletions(-) create mode 100644 changelogs/fragments/8675-pipx-install-suffix.yml diff --git a/changelogs/fragments/8675-pipx-install-suffix.yml b/changelogs/fragments/8675-pipx-install-suffix.yml new file mode 100644 index 0000000000..4b5a9a99bc --- /dev/null +++ b/changelogs/fragments/8675-pipx-install-suffix.yml @@ -0,0 +1,2 @@ +minor_changes: + - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656). diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index a385ec93e7..3f493545d5 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -28,7 +28,6 @@ def pipx_runner(module, command, **kwargs): module, command=command, arg_formats=dict( - state=fmt.as_map(_state_map), name=fmt.as_list(), name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), @@ -43,6 +42,7 @@ def pipx_runner(module, command, **kwargs): _list=fmt.as_fixed(['list', '--include-injected', '--json']), editable=fmt.as_bool("--editable"), pip_args=fmt.as_opt_eq_val('--pip-args'), + suffix=fmt.as_opt_val('--suffix'), ), environ_update={'USE_EMOJI': '0'}, check_rc=True, diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index e82e4c32a2..372d4bec01 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -114,14 +114,20 @@ options: - Arbitrary arguments to pass directly to C(pip). type: str version_added: 4.6.0 + suffix: + description: + - Optional suffix for virtual environment and executable names. + - "B(Warning): C(pipx) documentation states this is an B(experimental) feature subject to change." + type: str + version_added: 9.3.0 notes: + - This module requires C(pipx) version 0.16.2.1 or above. + - Please note that C(pipx) requires Python 3.6 or above. - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - > This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR) passed using the R(environment Ansible keyword, playbooks_environment). - - This module requires C(pipx) version 0.16.2.1 or above. - - Please note that C(pipx) requires Python 3.6 or above. - > This first implementation does not verify whether a specified version constraint has been installed or not. Hence, when using version operators, C(pipx) module will always try to execute the operation, @@ -168,6 +174,10 @@ from ansible_collections.community.general.plugins.module_utils.pipx import pipx from ansible.module_utils.facts.compat import ansible_facts +def _make_name(name, suffix): + return name if suffix is None else "{0}{1}".format(name, suffix) + + class PipX(StateModuleHelper): output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] module = dict( @@ -188,6 +198,7 @@ class PipX(StateModuleHelper): executable=dict(type='path'), editable=dict(type='bool', default=False), pip_args=dict(type='str'), + suffix=dict(type='str'), ), required_if=[ ('state', 'present', ['name']), @@ -199,6 +210,9 @@ class PipX(StateModuleHelper): ('state', 'latest', ['name']), ('state', 'inject', ['name', 'inject_packages']), ], + required_by=dict( + suffix="name", + ), supports_check_mode=True, ) use_old_vardict = False @@ -222,9 +236,10 @@ class PipX(StateModuleHelper): installed = self.runner('_list', output_process=process_list).run(_list=1) if self.vars.name is not None: - app_list = installed.get(self.vars.name) + name = _make_name(self.vars.name, self.vars.suffix) + app_list = installed.get(name) if app_list: - return {self.vars.name: app_list} + return {name: app_list} else: return {} @@ -253,45 +268,50 @@ class PipX(StateModuleHelper): def state_install(self): if not self.vars.application or self.vars.force: self.changed = True - with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + args = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args, check_mode_skip=True) as ctx: ctx.run(name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) state_present = state_install def state_upgrade(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to upgrade a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to upgrade a non-existent application: {0}".format(name)) if self.vars.force: self.changed = True with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: - ctx.run() + ctx.run(name=name) self._capture_results(ctx) def state_uninstall(self): if self.vars.application: + name = _make_name(self.vars.name, self.vars.suffix) with self.runner('state name', check_mode_skip=True) as ctx: - ctx.run() + ctx.run(name=name) self._capture_results(ctx) state_absent = state_uninstall def state_reinstall(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to reinstall a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to reinstall a non-existent application: {0}".format(name)) self.changed = True with self.runner('state name python', check_mode_skip=True) as ctx: - ctx.run() + ctx.run(name=name) self._capture_results(ctx) def state_inject(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to inject packages into a non-existent application: {0}".format(name)) if self.vars.force: self.changed = True with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: - ctx.run() + ctx.run(name=name) self._capture_results(ctx) def state_uninstall_all(self): @@ -314,7 +334,8 @@ class PipX(StateModuleHelper): def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True - with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + args = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args, check_mode_skip=True) as ctx: ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index ad5e14104b..aee8948b90 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -171,7 +171,7 @@ state: latest register: install_tox_latest_with_preinstall_again -- name: install application latest tox +- name: install application latest tox (force) community.general.pipx: name: tox state: latest @@ -339,3 +339,36 @@ assert: that: - install_pyinstaller is changed + +############################################################################## +# Test for issue 8656 +- name: ensure application conan2 is uninstalled + community.general.pipx: + name: conan2 + state: absent + +- name: Install Python Package conan with suffix 2 (conan2) + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2 + +- name: Install Python Package conan with suffix 2 (conan2) again + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2_again + +- name: cleanup conan2 + community.general.pipx: + name: conan2 + state: absent + +- name: check assertions + assert: + that: + - install_conan2 is changed + - "' - conan2' in install_conan2.stdout" + - install_conan2_again is not changed From 9a16eaf9ba657b7f944c085ed66c69b36752d3b4 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 7 Aug 2024 15:18:58 +0200 Subject: [PATCH 184/482] Fix pylint and pep8 issues exposed by latest ansible-core's ansible-test sanity checks (#8720) * Remove bad whitespace. * 'Fixing' various used-before-assignment issues that pylint flagged. --- plugins/action/iptables_state.py | 4 ++++ plugins/callback/opentelemetry.py | 1 + plugins/module_utils/ilo_redfish_utils.py | 1 + plugins/modules/aix_inittab.py | 1 + plugins/modules/cronvar.py | 1 + plugins/modules/iptables_state.py | 1 + plugins/modules/memset_zone_record.py | 1 + plugins/modules/proxmox.py | 2 ++ plugins/modules/proxmox_disk.py | 1 + plugins/modules/snmp_facts.py | 2 ++ tests/unit/plugins/inventory/test_xen_orchestra.py | 2 +- 11 files changed, 16 insertions(+), 1 deletion(-) diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index 4a27ef8a01..5ea55af58c 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -88,6 +88,10 @@ class ActionModule(ActionBase): max_timeout = self._connection._play_context.timeout module_args = self._task.args + async_status_args = {} + starter_cmd = None + confirm_cmd = None + if module_args.get('state', None) == 'restored': if not wrap_async: if not check_mode: diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index c6e8a87c16..2b2a5706fc 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -356,6 +356,7 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.OK) if host_data.status != 'included': # Support loops + enriched_error_message = None if 'results' in host_data.result._result: if host_data.status == 'failed': message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py index 9cb6e527a3..808583ae63 100644 --- a/plugins/module_utils/ilo_redfish_utils.py +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -29,6 +29,7 @@ class iLORedfishUtils(RedfishUtils): result['ret'] = True data = response['data'] + current_session = None if 'Oem' in data: if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py index d4c9aa0b56..79336bab8d 100644 --- a/plugins/modules/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -192,6 +192,7 @@ def main(): rmitab = module.get_bin_path('rmitab') chitab = module.get_bin_path('chitab') rc = 0 + err = None # check if the new entry exists current_entry = check_current_entry(module) diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py index fdcbc7d24b..66fa175498 100644 --- a/plugins/modules/cronvar.py +++ b/plugins/modules/cronvar.py @@ -183,6 +183,7 @@ class CronVar(object): fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') + path = None else: filed, path = tempfile.mkstemp(prefix='crontab') fileh = os.fdopen(filed, 'w') diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index b0cc3bd3f6..c97b5694c9 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -459,6 +459,7 @@ def main(): if not os.access(b_path, os.R_OK): module.fail_json(msg="Source %s not readable" % path) state_to_restore = read_state(b_path) + cmd = None else: cmd = ' '.join(SAVECOMMAND) diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 8406d93d21..80838a26a3 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -181,6 +181,7 @@ def api_validation(args=None): https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) ''' failed_validation = False + error = None # priority can only be integer 0 > 999 if not 0 <= args['priority'] <= 999: diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 775f1a6b52..20e20e9a88 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -740,6 +740,8 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): # If not, we have proxmox create one using the special syntax except Exception: vol_string = "{storage}:{size}".format(storage=storage, size=size) + else: + raise AssertionError('Internal error') # 1.3 If we have a host_path, we don't have storage, a volume, or a size vol_string = ",".join( diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py index 83cdbeee58..979e551336 100644 --- a/plugins/modules/proxmox_disk.py +++ b/plugins/modules/proxmox_disk.py @@ -544,6 +544,7 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): # NOOP return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid) + timeout_str = "Reached timeout. Last line in task before timeout: %s" if (create == 'regular' and disk not in vm_config) or (create == 'forced'): # CREATE playbook_config = self.get_create_attributes() diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index aecc08f325..39c75bcd93 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -307,6 +307,8 @@ def main(): if m_args['community'] is None: module.fail_json(msg='Community not set when using snmp version 2') + integrity_proto = None + privacy_proto = None if m_args['version'] == "v3": if m_args['username'] is None: module.fail_json(msg='Username not set when using snmp version 3') diff --git a/tests/unit/plugins/inventory/test_xen_orchestra.py b/tests/unit/plugins/inventory/test_xen_orchestra.py index bae038e807..d626fb988b 100644 --- a/tests/unit/plugins/inventory/test_xen_orchestra.py +++ b/tests/unit/plugins/inventory/test_xen_orchestra.py @@ -146,7 +146,7 @@ def serialize_groups(groups): return list(map(str, groups)) -@ pytest.fixture(scope="module") +@pytest.fixture(scope="module") def inventory(): r = InventoryModule() r.inventory = InventoryData() From 5322dd942e317e56a05dde642c6a7cd50f9d1d08 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 7 Aug 2024 16:31:12 +0200 Subject: [PATCH 185/482] Remove invalid cloud/gandi entry (#8725) Remove invalid cloud/gandi entry. --- tests/integration/targets/gandi_livedns/aliases | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/targets/gandi_livedns/aliases b/tests/integration/targets/gandi_livedns/aliases index f69a127f4d..bd1f024441 100644 --- a/tests/integration/targets/gandi_livedns/aliases +++ b/tests/integration/targets/gandi_livedns/aliases @@ -2,5 +2,4 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -cloud/gandi unsupported From b16263ebd7c0bd9a48acb10d0ae609e8baf7f704 Mon Sep 17 00:00:00 2001 From: Matthias Kunnen Date: Thu, 8 Aug 2024 11:21:36 +0200 Subject: [PATCH 186/482] Clarify contribution guide on integration tests (#8718) * Clarify contribution guide on integration tests * Improve test guide in CONTRIBUTING.md * Uppercase Docker Co-authored-by: Felix Fontein * Improve test_name documentation * Use working example in ansible-test integration docs Co-authored-by: Felix Fontein * Fix test_name in ansible-test integration being documented as required --------- Co-authored-by: Felix Fontein --- CONTRIBUTING.md | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5363b4daca..55a7098cc2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,6 +56,8 @@ cd ~/dev/ansible_collections/community/general Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. +### Sanity tests + The following commands show how to run sanity tests: ```.bash @@ -66,6 +68,8 @@ ansible-test sanity --docker -v ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ ``` +### Unit tests + The following commands show how to run unit tests: ```.bash @@ -79,13 +83,32 @@ ansible-test units --docker -v --python 3.8 ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py ``` +### Integration tests + The following commands show how to run integration tests: -```.bash -# Run integration tests for the interfaces_files module in a Docker container using the -# fedora35 operating system image (the supported images depend on your ansible-core version): -ansible-test integration --docker fedora35 -v interfaces_file +#### In Docker +Integration tests on Docker have the following parameters: +- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run + `ansible-test integration --help` and look for _target docker images_. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. + For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. +```.bash +# Test all plugins/modules on fedora40 +ansible-test integration -v --docker fedora40 + +# Template +ansible-test integration -v --docker image_name test_name + +# Example community.general.ini_file module on fedora40 Docker image: +ansible-test integration -v --docker fedora40 ini_file +``` + +#### Without isolation + +```.bash # Run integration tests for the flattened lookup **without any isolation**: ansible-test integration -v lookup_flattened ``` From 9f340861ad7c3ff64bd8c0f7ed5b4f9c5e79854f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Aug 2024 18:19:58 +1200 Subject: [PATCH 187/482] django_manage: rely on package idempotency to install virtualenv (#8644) * rely on package idempotency to install virtualenv * improve os package name logic in integration tests * add os families debian, redhat * add os families archlinux * fix pkg name in archlinux * improvement from PR * typo * Update tests/integration/targets/setup_os_pkg_name/tasks/debian.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../targets/django_manage/meta/main.yml | 1 + .../targets/django_manage/tasks/main.yaml | 11 ++------ .../setup_os_pkg_name/tasks/alpine.yml | 11 ++++++++ .../setup_os_pkg_name/tasks/archlinux.yml | 11 ++++++++ .../setup_os_pkg_name/tasks/debian.yml | 10 +++++++ .../setup_os_pkg_name/tasks/default.yml | 11 ++++++++ .../targets/setup_os_pkg_name/tasks/main.yml | 26 +++++++++++++++++++ .../setup_os_pkg_name/tasks/redhat.yml | 10 +++++++ .../targets/setup_os_pkg_name/tasks/suse.yml | 11 ++++++++ 9 files changed, 93 insertions(+), 9 deletions(-) create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/debian.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/default.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/main.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml create mode 100644 tests/integration/targets/setup_os_pkg_name/tasks/suse.yml diff --git a/tests/integration/targets/django_manage/meta/main.yml b/tests/integration/targets/django_manage/meta/main.yml index 2fcd152f95..4a216308a2 100644 --- a/tests/integration/targets/django_manage/meta/main.yml +++ b/tests/integration/targets/django_manage/meta/main.yml @@ -5,3 +5,4 @@ dependencies: - setup_pkg_mgr + - setup_os_pkg_name diff --git a/tests/integration/targets/django_manage/tasks/main.yaml b/tests/integration/targets/django_manage/tasks/main.yaml index 5307fb6642..9c2d4789e3 100644 --- a/tests/integration/targets/django_manage/tasks/main.yaml +++ b/tests/integration/targets/django_manage/tasks/main.yaml @@ -9,17 +9,10 @@ suffix: .django_manage register: tmp_django_root -- name: Install virtualenv on CentOS 8 +- name: Install virtualenv package: - name: virtualenv + name: "{{ os_package_name.virtualenv }}" state: present - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8' - -- name: Install virtualenv on Arch Linux - pip: - name: virtualenv - state: present - when: ansible_os_family == 'Archlinux' - name: Install required library pip: diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml b/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml new file mode 100644 index 0000000000..bb17b5e5f1 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (alpine) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: py3-virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml b/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml new file mode 100644 index 0000000000..bb98583506 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (archlinux) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: python-virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml b/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml new file mode 100644 index 0000000000..6a20de1eeb --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml @@ -0,0 +1,10 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (debian) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: {} diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/default.yml b/tests/integration/targets/setup_os_pkg_name/tasks/default.yml new file mode 100644 index 0000000000..977d690437 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/default.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (default) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/main.yml b/tests/integration/targets/setup_os_pkg_name/tasks/main.yml new file mode 100644 index 0000000000..91066cf53c --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/main.yml @@ -0,0 +1,26 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Make sure we have the ansible_os_family and ansible_distribution_version facts + ansible.builtin.setup: + gather_subset: distribution + when: ansible_facts == {} + +- name: Create OS Package name fact + ansible.builtin.set_fact: + os_package_name: {} + +- name: Include the files setting the package names + ansible.builtin.include_tasks: "{{ file }}" + loop_control: + loop_var: file + loop: + - "default.yml" + - "{{ ansible_os_family | lower }}.yml" diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml b/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml new file mode 100644 index 0000000000..022de8b961 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml @@ -0,0 +1,10 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (redhat) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: {} diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml b/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml new file mode 100644 index 0000000000..db2b0a1fa2 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (suse) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: python3-virtualenv From b9244130ef65dcca5b507e6d2e22be939cfb6935 Mon Sep 17 00:00:00 2001 From: Veikko Virrankoski <71337077+vvirrank@users.noreply.github.com> Date: Sun, 11 Aug 2024 21:10:35 +0300 Subject: [PATCH 188/482] Gitlab_project: add options for repository_access_level and container_expiration_policy (#8674) * gitlab_project: add option repository_access_level to disable repository * gitlab_project: add option container_expiration_policy to schedule registry cleanup * add chnagelog fragment * Fix changelog fragment PR id * Fix formatting * Add required suboptions for container_expiration_policy * Handle setting only a subset of policy attributes * Fix changed indicator when policy attribute has null value * Add descriptions to field clearing options in gitlab container_expiration_policy --- ...8674-add-gitlab-project-cleanup-policy.yml | 3 + plugins/modules/gitlab_project.py | 73 ++++++++++++++++++- 2 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml diff --git a/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml b/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml new file mode 100644 index 0000000000..f67e11a6b0 --- /dev/null +++ b/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674). + - gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index f1b96bfac5..4c4dbd77b5 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -180,6 +180,14 @@ options: - Supports project's default branch update since community.general 8.0.0. type: str version_added: "4.2.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" builds_access_level: description: - V(private) means that repository CI/CD is allowed only to project members. @@ -259,6 +267,41 @@ options: type: list elements: str version_added: "6.6.0" + container_expiration_policy: + description: + - Project cleanup policy for its container registry. + type: dict + suboptions: + cadence: + description: + - How often cleanup should be run. + type: str + choices: ["1d", "7d", "14d", "1month", "3month"] + enabled: + description: + - Enable the cleanup policy. + type: bool + keep_n: + description: + - Number of tags kept per image name. + - V(0) clears the field. + type: int + choices: [0, 1, 5, 10, 25, 50, 100] + older_than: + description: + - Destroy tags older than this. + - V(0d) clears the field. + type: str + choices: ["0d", "7d", "14d", "30d", "90d"] + name_regex: + description: + - Destroy tags matching this regular expression. + type: str + name_regex_keep: + description: + - Keep tags matching this regular expression. + type: str + version_added: "9.3.0" ''' EXAMPLES = r''' @@ -375,6 +418,7 @@ class GitLabProject(object): 'squash_option': options['squash_option'], 'ci_config_path': options['ci_config_path'], 'shared_runners_enabled': options['shared_runners_enabled'], + 'repository_access_level': options['repository_access_level'], 'builds_access_level': options['builds_access_level'], 'forking_access_level': options['forking_access_level'], 'container_registry_access_level': options['container_registry_access_level'], @@ -384,6 +428,7 @@ class GitLabProject(object): 'infrastructure_access_level': options['infrastructure_access_level'], 'monitor_access_level': options['monitor_access_level'], 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + 'container_expiration_policy': options['container_expiration_policy'], } # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version @@ -471,7 +516,20 @@ class GitLabProject(object): for arg_key, arg_value in arguments.items(): if arguments[arg_key] is not None: if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) + if arg_key == 'container_expiration_policy': + old_val = getattr(project, arg_key) + final_val = {key: value for key, value in arg_value.items() if value is not None} + + if final_val.get('older_than') == '0d': + final_val['older_than'] = None + if final_val.get('keep_n') == 0: + final_val['keep_n'] = None + + if all(old_val.get(key) == value for key, value in final_val.items()): + continue + setattr(project, 'container_expiration_policy_attributes', final_val) + else: + setattr(project, arg_key, arg_value) changed = True return (changed, project) @@ -526,6 +584,7 @@ def main(): ci_config_path=dict(type='str'), shared_runners_enabled=dict(type='bool'), avatar_path=dict(type='path'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), @@ -536,6 +595,14 @@ def main(): monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), topics=dict(type='list', elements='str'), + container_expiration_policy=dict(type='dict', default=None, options=dict( + cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type='bool'), + keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type='str'), + name_regex_keep=dict(type='str'), + )), )) module = AnsibleModule( @@ -585,6 +652,7 @@ def main(): shared_runners_enabled = module.params['shared_runners_enabled'] avatar_path = module.params['avatar_path'] default_branch = module.params['default_branch'] + repository_access_level = module.params['repository_access_level'] builds_access_level = module.params['builds_access_level'] forking_access_level = module.params['forking_access_level'] container_registry_access_level = module.params['container_registry_access_level'] @@ -595,6 +663,7 @@ def main(): monitor_access_level = module.params['monitor_access_level'] security_and_compliance_access_level = module.params['security_and_compliance_access_level'] topics = module.params['topics'] + container_expiration_policy = module.params['container_expiration_policy'] # Set project_path to project_name if it is empty. if project_path is None: @@ -659,6 +728,7 @@ def main(): "ci_config_path": ci_config_path, "shared_runners_enabled": shared_runners_enabled, "avatar_path": avatar_path, + "repository_access_level": repository_access_level, "builds_access_level": builds_access_level, "forking_access_level": forking_access_level, "container_registry_access_level": container_registry_access_level, @@ -669,6 +739,7 @@ def main(): "monitor_access_level": monitor_access_level, "security_and_compliance_access_level": security_and_compliance_access_level, "topics": topics, + "container_expiration_policy": container_expiration_policy, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) From e7480ad29e4b5f2188df7d0df6d9a96fd1ccea31 Mon Sep 17 00:00:00 2001 From: Julien Lecomte Date: Mon, 12 Aug 2024 07:32:01 +0200 Subject: [PATCH 189/482] gitlab_project: Add some missing params (#8688) --- .../8688-gitlab_project-add-new-params.yml | 4 +++ plugins/modules/gitlab_project.py | 35 ++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8688-gitlab_project-add-new-params.yml diff --git a/changelogs/fragments/8688-gitlab_project-add-new-params.yml b/changelogs/fragments/8688-gitlab_project-add-new-params.yml new file mode 100644 index 0000000000..0c6b8e505a --- /dev/null +++ b/changelogs/fragments/8688-gitlab_project-add-new-params.yml @@ -0,0 +1,4 @@ +minor_changes: + - gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688). + - gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688). + - gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688). diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index 4c4dbd77b5..b5e8bccc23 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -302,6 +302,27 @@ options: - Keep tags matching this regular expression. type: str version_added: "9.3.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + model_registry_access_level: + description: + - V(private) means that accessing model registry tab is allowed only to project members. + - V(disabled) means that accessing model registry tab is disabled. + - V(enabled) means that accessing model registry tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" ''' EXAMPLES = r''' @@ -429,6 +450,9 @@ class GitLabProject(object): 'monitor_access_level': options['monitor_access_level'], 'security_and_compliance_access_level': options['security_and_compliance_access_level'], 'container_expiration_policy': options['container_expiration_policy'], + 'pages_access_level': options['pages_access_level'], + 'service_desk_enabled': options['service_desk_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], } # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version @@ -603,6 +627,9 @@ def main(): name_regex=dict(type='str'), name_regex_keep=dict(type='str'), )), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), + model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), )) module = AnsibleModule( @@ -664,6 +691,9 @@ def main(): security_and_compliance_access_level = module.params['security_and_compliance_access_level'] topics = module.params['topics'] container_expiration_policy = module.params['container_expiration_policy'] + pages_access_level = module.params['pages_access_level'] + service_desk_enabled = module.params['service_desk_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] # Set project_path to project_name if it is empty. if project_path is None: @@ -702,7 +732,7 @@ def main(): if project_exists: gitlab_project.delete_project() module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") + module.exit_json(changed=False, msg="Project deleted or does not exist") if state == 'present': @@ -740,6 +770,9 @@ def main(): "security_and_compliance_access_level": security_and_compliance_access_level, "topics": topics, "container_expiration_policy": container_expiration_policy, + "pages_access_level": pages_access_level, + "service_desk_enabled": service_desk_enabled, + "model_registry_access_level": model_registry_access_level, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) From 57e28e5a73d28d86b7ce401d7691ff62812ccbda Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 12 Aug 2024 07:32:34 +0200 Subject: [PATCH 190/482] keycloak_identity_provider: get cleartext clientsecret (#8735) * get cleartext `clientSecret` from full realm info * add mock get_realm call to existing tests; add new no_change_when_present test * add changelog fragment * remove blank lines * Update changelog. --------- Co-authored-by: Felix Fontein --- ...r-get-cleartext-secret-from-realm-info.yml | 2 + plugins/modules/keycloak_identity_provider.py | 9 + .../test_keycloak_identity_provider.py | 304 +++++++++++++++++- 3 files changed, 304 insertions(+), 11 deletions(-) create mode 100644 changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml diff --git a/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml b/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml new file mode 100644 index 0000000000..ed3806bd5f --- /dev/null +++ b/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735). \ No newline at end of file diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index 2eca3a06d2..bb958d9e94 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -445,6 +445,15 @@ def get_identity_provider_with_mappers(kc, alias, realm): idp = kc.get_identity_provider(alias, realm) if idp is not None: idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + # clientSecret returned by API when using `get_identity_provider(alias, realm)` is always ********** + # to detect changes to the secret, we get the actual cleartext secret from the full realm info + if 'config' in idp: + if 'clientSecret' in idp['config']: + for idp_from_realm in kc.get_realm_by_id(realm).get('identityProviders', []): + if idp_from_realm['internalId'] == idp['internalId']: + cleartext_secret = idp_from_realm.get('config', {}).get('clientSecret') + if cleartext_secret: + idp['config']['clientSecret'] = cleartext_secret if idp is None: idp = {} return idp diff --git a/tests/unit/plugins/modules/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/test_keycloak_identity_provider.py index 6fd258b8a3..a893a130a5 100644 --- a/tests/unit/plugins/modules/test_keycloak_identity_provider.py +++ b/tests/unit/plugins/modules/test_keycloak_identity_provider.py @@ -23,7 +23,7 @@ from ansible.module_utils.six import StringIO @contextmanager def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None, get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None, - delete_identity_provider_mapper=None): + delete_identity_provider_mapper=None, get_realm_by_id=None): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -55,9 +55,11 @@ def patch_keycloak_api(get_identity_provider, create_identity_provider=None, upd as mock_update_identity_provider_mapper: with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \ as mock_delete_identity_provider_mapper: - yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ - mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ - mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper + with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) \ + as mock_get_realm_by_id: + yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ + mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ + mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper, mock_get_realm_by_id def get_response(object_with_future_response, method, get_id_call_count): @@ -200,6 +202,38 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "last_name" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + ] return_value_idp_created = [None] return_value_mapper_created = [None, None] changed = True @@ -210,15 +244,17 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created) \ + create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2) @@ -444,6 +480,68 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "last_name" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + ] return_value_idp_updated = [None] return_value_mapper_updated = [None] return_value_mapper_created = [None] @@ -456,15 +554,16 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created) \ + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 5) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) self.assertEqual(len(mock_update_identity_provider.mock_calls), 1) self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1) @@ -472,6 +571,156 @@ class TestKeycloakIdentityProvider(ModuleTestCase): # Verify that the module's changed status matches what is expected self.assertIs(exec_info.exception.args[0]['changed'], changed) + def test_no_change_when_present(self): + """Update existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + 'mappers': [{ + 'name': "username", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "username", + 'user.attribute': "username", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + ] + return_value_mappers_get = [ + [{ + 'config': { + 'claim': "username", + 'syncMode': "INHERIT", + 'user.attribute': "username" + }, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'name': "username" + }], + [{ + 'config': { + 'claim': "username", + 'syncMode': "INHERIT", + 'user.attribute': "username" + }, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'name': "username" + }] + ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + } + ] + return_value_idp_updated = [None] + return_value_mapper_updated = [None] + return_value_mapper_created = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) + self.assertEqual(len(mock_update_identity_provider.mock_calls), 0) + self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 0) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + def test_delete_when_absent(self): """Remove an absent identity provider""" @@ -497,7 +746,7 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -560,6 +809,38 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "email" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "alias": "oidc", + "displayName": "", + "internalId": "2bca4192-e816-4beb-bcba-190164eb55b8", + "providerId": "oidc", + "enabled": True, + "updateProfileFirstLoginMode": "on", + "trustEmail": False, + "storeToken": False, + "addReadTokenRoleOnCreate": False, + "authenticateByDefault": False, + "linkOnly": False, + "config": { + "validateSignature": "false", + "pkceEnabled": "false", + "tokenUrl": "https://localhost:8000", + "clientId": "asdf", + "authorizationUrl": "https://localhost:8000", + "clientAuthMethod": "client_secret_post", + "clientSecret": "real_secret", + "guiOrder": "0" + } + }, + ] + }, + ] return_value_idp_deleted = [None] changed = True @@ -569,15 +850,16 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - delete_identity_provider=return_value_idp_deleted) \ + delete_identity_provider=return_value_idp_deleted, get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1) # Verify that the module's changed status matches what is expected From 8989b6c4d4b7588c7f0c97185fb18bb9bf9d08c2 Mon Sep 17 00:00:00 2001 From: Adam Tygart Date: Mon, 12 Aug 2024 00:33:07 -0500 Subject: [PATCH 191/482] Namespace the passwordstore lockfile (#8689) * Namespace the lockfile When passwordstore needs to grab a lock, it creates a statically file (within /tmp, typically). This is unfortunate, when there might be more than one user using the passwordstore functionality on that machine. Prepend the user to the filename, to bypass further issues. * Update plugins/lookup/passwordstore.py specifically reference the argument number in the format string. Co-authored-by: Felix Fontein * Add changelog fragment for PR#8689 * Update 8689-passwordstore-lock-naming.yml I was sure that was a copy/paste. * Update changelogs/fragments/8689-passwordstore-lock-naming.yml specify the type of plugin Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8689-passwordstore-lock-naming.yml | 2 ++ plugins/lookup/passwordstore.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8689-passwordstore-lock-naming.yml diff --git a/changelogs/fragments/8689-passwordstore-lock-naming.yml b/changelogs/fragments/8689-passwordstore-lock-naming.yml new file mode 100644 index 0000000000..c5c9a82d78 --- /dev/null +++ b/changelogs/fragments/8689-passwordstore-lock-naming.yml @@ -0,0 +1,2 @@ +minor_changes: + - passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index a1b0842a7b..510bdbec3d 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -469,7 +469,8 @@ class LookupModule(LookupBase): def opt_lock(self, type): if self.get_option('lock') == type: tmpdir = os.environ.get('TMPDIR', '/tmp') - lockfile = os.path.join(tmpdir, '.passwordstore.lock') + user = os.environ.get('USER') + lockfile = os.path.join(tmpdir, '.{0}.passwordstore.lock'.format(user)) with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield From 158947f5e50e9d74fe0b01c517250aef3afdf9b7 Mon Sep 17 00:00:00 2001 From: Matthias Kunnen Date: Mon, 12 Aug 2024 07:33:54 +0200 Subject: [PATCH 192/482] Add support for multiple locales in locale_gen (#8682) * Add support for multiple locales in locale_gen * Add changelog fragment * Remove extraneous newlines * Remove typehints * Add 'before version' to names documentation * Remove extraneous comment * Replace fstring with .format * Refer to issue in changelog fragment Co-authored-by: Felix Fontein * Clarify version Co-authored-by: Felix Fontein * Add newline between examples Co-authored-by: Felix Fontein * Use semantic markup for locale value Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8682-locale-gen-multiple.yaml | 2 + plugins/modules/locale_gen.py | 93 +++++++++++++------ .../targets/locale_gen/vars/main.yml | 9 ++ 3 files changed, 75 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/8682-locale-gen-multiple.yaml diff --git a/changelogs/fragments/8682-locale-gen-multiple.yaml b/changelogs/fragments/8682-locale-gen-multiple.yaml new file mode 100644 index 0000000000..139f372353 --- /dev/null +++ b/changelogs/fragments/8682-locale-gen-multiple.yaml @@ -0,0 +1,2 @@ +minor_changes: + - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682). diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index fe501e0239..8886cdc9cd 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -25,9 +25,11 @@ attributes: support: none options: name: - type: str + type: list + elements: str description: - - Name and encoding of the locale, such as "en_GB.UTF-8". + - Name and encoding of the locales, such as V(en_GB.UTF-8). + - Before community.general 9.3.0, this was a string. Using a string still works. required: true state: type: str @@ -44,6 +46,13 @@ EXAMPLES = ''' community.general.locale_gen: name: de_CH.UTF-8 state: present + +- name: Ensure multiple locales exist + community.general.locale_gen: + name: + - en_GB.UTF-8 + - nl_NL.UTF-8 + state: present ''' import os @@ -74,7 +83,7 @@ class LocaleGen(StateModuleHelper): output_params = ["name"] module = dict( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type="list", elements="str", required=True), state=dict(type='str', default='present', choices=['absent', 'present']), ), supports_check_mode=True, @@ -91,9 +100,7 @@ class LocaleGen(StateModuleHelper): self.LOCALE_SUPPORTED, self.LOCALE_GEN )) - if not self.is_available(): - self.do_raise("The locale you've entered is not available on your system.") - + self.assert_available() self.vars.set("is_present", self.is_present(), output=False) self.vars.set("state_tracking", self._state_name(self.vars.is_present), output=False, change=True) @@ -104,8 +111,8 @@ class LocaleGen(StateModuleHelper): def _state_name(present): return "present" if present else "absent" - def is_available(self): - """Check if the given locale is available on the system. This is done by + def assert_available(self): + """Check if the given locales are available on the system. This is done by checking either : * if the locale is present in /etc/locales.gen * or if the locale is present in /usr/share/i18n/SUPPORTED""" @@ -121,18 +128,35 @@ class LocaleGen(StateModuleHelper): res = [re_compiled.match(line) for line in lines] if self.verbosity >= 4: self.vars.available_lines = lines - if any(r.group("locale") == self.vars.name for r in res if r): - return True + + locales_not_found = [] + for locale in self.vars.name: + # Check if the locale is not found in any of the matches + if not any(match and match.group("locale") == locale for match in res): + locales_not_found.append(locale) + # locale may be installed but not listed in the file, for example C.UTF-8 in some systems - return self.is_present() + locales_not_found = self.locale_get_not_present(locales_not_found) + + if locales_not_found: + self.do_raise("The following locales you've entered are not available on your system: {0}".format(', '.join(locales_not_found))) def is_present(self): + return not self.locale_get_not_present(self.vars.name) + + def locale_get_not_present(self, locales): runner = locale_runner(self.module) with runner() as ctx: rc, out, err = ctx.run() if self.verbosity >= 4: self.vars.locale_run_info = ctx.run_info - return any(self.fix_case(self.vars.name) == self.fix_case(line) for line in out.splitlines()) + + not_found = [] + for locale in locales: + if not any(self.fix_case(locale) == self.fix_case(line) for line in out.splitlines()): + not_found.append(locale) + + return not_found def fix_case(self, name): """locale -a might return the encoding in either lower or upper case. @@ -141,39 +165,50 @@ class LocaleGen(StateModuleHelper): name = name.replace(s, r) return name - def set_locale(self, name, enabled=True): + def set_locale(self, names, enabled=True): """ Sets the state of the locale. Defaults to enabled. """ - search_string = r'#?\s*%s (?P.+)' % re.escape(name) - if enabled: - new_string = r'%s \g' % (name) - else: - new_string = r'# %s \g' % (name) - re_search = re.compile(search_string) - with open("/etc/locale.gen", "r") as fr: - lines = [re_search.sub(new_string, line) for line in fr] - with open("/etc/locale.gen", "w") as fw: - fw.write("".join(lines)) + with open("/etc/locale.gen", 'r') as fr: + lines = fr.readlines() - def apply_change(self, targetState, name): + locale_regexes = [] + + for name in names: + search_string = r'^#?\s*%s (?P.+)' % re.escape(name) + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + re_search = re.compile(search_string) + locale_regexes.append([re_search, new_string]) + + for i in range(len(lines)): + for [search, replace] in locale_regexes: + lines[i] = search.sub(replace, lines[i]) + + # Write the modified content back to the file + with open("/etc/locale.gen", 'w') as fw: + fw.writelines(lines) + + def apply_change(self, targetState, names): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. + names -- Names list including encoding such as de_CH.UTF-8. """ - self.set_locale(name, enabled=(targetState == "present")) + self.set_locale(names, enabled=(targetState == "present")) runner = locale_gen_runner(self.module) with runner() as ctx: ctx.run() - def apply_change_ubuntu(self, targetState, name): + def apply_change_ubuntu(self, targetState, names): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. + names -- Name list including encoding such as de_CH.UTF-8. """ runner = locale_gen_runner(self.module) @@ -189,7 +224,7 @@ class LocaleGen(StateModuleHelper): with open("/var/lib/locales/supported.d/local", "w") as fw: for line in content: locale, charset = line.split(' ') - if locale != name: + if locale not in names: fw.write(line) # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! diff --git a/tests/integration/targets/locale_gen/vars/main.yml b/tests/integration/targets/locale_gen/vars/main.yml index 44327ddd31..23358e6374 100644 --- a/tests/integration/targets/locale_gen/vars/main.yml +++ b/tests/integration/targets/locale_gen/vars/main.yml @@ -15,3 +15,12 @@ locale_list_basic: - localegen: eo locales: [eo] skip_removal: false + - localegen: + - ar_BH.UTF-8 + - tr_CY.UTF-8 + locales: + - ar_BH.UTF-8 + - ar_BH.utf8 + - tr_CY.UTF-8 + - tr_CY.utf8 + skip_removal: false From 76d0222a83d7986e8fa6f693e87218cd0931f835 Mon Sep 17 00:00:00 2001 From: John Byrne <6145795+johnbyrneio@users.noreply.github.com> Date: Mon, 12 Aug 2024 01:34:32 -0400 Subject: [PATCH 193/482] homebrew_cask: fix upgrade_all changed when nothing upgraded (#8708) * homebrew_cask: fix upgrade_all changed when nothing upgraded * Add changelog fragment * Update changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml Co-authored-by: Felix Fontein * Add .strip() to upgrade output check Co-authored-by: Felix Fontein --------- Co-authored-by: John Byrne Co-authored-by: Felix Fontein --- .../fragments/8708-homebrew_cask-fix-upgrade-all.yml | 2 ++ plugins/modules/homebrew_cask.py | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml diff --git a/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml b/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml new file mode 100644 index 0000000000..6a0cd74302 --- /dev/null +++ b/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). \ No newline at end of file diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index dc9aea5db8..9902cb1373 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -534,7 +534,12 @@ class HomebrewCask(object): rc, out, err = self.module.run_command(cmd) if rc == 0: - if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + # 'brew upgrade --cask' does not output anything if no casks are upgraded + if not out.strip(): + self.message = 'Homebrew casks already upgraded.' + + # handle legacy 'brew cask upgrade' + elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): self.message = 'Homebrew casks already upgraded.' else: From 0f59bb7a99258be7f885f8f77e5fcf1de8a0b0e5 Mon Sep 17 00:00:00 2001 From: Scott Langendyk Date: Mon, 12 Aug 2024 01:35:43 -0400 Subject: [PATCH 194/482] Get interfaces for Proxmox LXC containers (#8713) * Get interfaces for Proxmox LXC containers * Add changelog * Don't use bare except * Update changelog from suggestion Co-authored-by: Felix Fontein * Only lookup interfaces for running containers * Ignore not implemented status * Check that key exists in properties dict * define ignore errors in mock * Use not in --------- Co-authored-by: Felix Fontein --- .../fragments/8713-proxmox_lxc_interfaces.yml | 2 ++ plugins/inventory/proxmox.py | 31 +++++++++++++++++++ tests/unit/plugins/inventory/test_proxmox.py | 2 +- 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8713-proxmox_lxc_interfaces.yml diff --git a/changelogs/fragments/8713-proxmox_lxc_interfaces.yml b/changelogs/fragments/8713-proxmox_lxc_interfaces.yml new file mode 100644 index 0000000000..32c475157e --- /dev/null +++ b/changelogs/fragments/8713-proxmox_lxc_interfaces.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). \ No newline at end of file diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 774833c488..a4b05b57ed 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -362,6 +362,34 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception: return None + def _get_lxc_interfaces(self, properties, node, vmid): + status_key = self._fact('status') + + if status_key not in properties or not properties[status_key] == 'running': + return + + ret = self._get_json("%s/api2/json/nodes/%s/lxc/%s/interfaces" % (self.proxmox_url, node, vmid), ignore_errors=[501]) + if not ret: + return + + result = [] + + for iface in ret: + result_iface = { + 'name': iface['name'], + 'hwaddr': iface['hwaddr'] + } + + if 'inet' in iface: + result_iface['inet'] = iface['inet'] + + if 'inet6' in iface: + result_iface['inet6'] = iface['inet6'] + + result.append(result_iface) + + properties[self._fact('lxc_interfaces')] = result + def _get_agent_network_interfaces(self, node, vmid, vmtype): result = [] @@ -526,6 +554,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._get_vm_config(properties, node, vmid, ittype, name) self._get_vm_snapshots(properties, node, vmid, ittype, name) + if ittype == 'lxc': + self._get_lxc_interfaces(properties, node, vmid) + # ensure the host satisfies filters if not self._can_add_host(name, properties): return None diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index ea6c84bcda..b8358df226 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -37,7 +37,7 @@ def get_auth(): # NOTE: when updating/adding replies to this function, # be sure to only add only the _contents_ of the 'data' dict in the API reply -def get_json(url): +def get_json(url, ignore_errors=None): if url == "https://localhost:8006/api2/json/nodes": # _get_nodes return [{"type": "node", From 73b54139d65c94aac5f5b4ba251afa2d676cb12e Mon Sep 17 00:00:00 2001 From: Wilfried ROSET Date: Mon, 12 Aug 2024 07:42:05 +0200 Subject: [PATCH 195/482] fix(opentelemetry): adjust default value for `store_spans_in_file` causing traces to be produced to a file named `None` (#8741) * fix(opentelemetry): adjust default value for `store_spans_in_file` causing traces to be produced to a file named `None` The commit 5f481939d introduced `store_spans_in_file` with the default value `None` as a string. This causes the value of `store_spans_in_file` to be a not empty string, value=None as a string and not a null value. The rest of the code check if the store_spans_in_file is not null which squeezes the rest of the code. The following commit set the default value as an empty string. Signed-off-by: Wilfried Roset * fix(opentelemetry): No default value is better, reword changelog Signed-off-by: Wilfried Roset --------- Signed-off-by: Wilfried Roset --- changelogs/fragments/8741-fix-opentelemetry-callback.yml | 2 ++ plugins/callback/opentelemetry.py | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8741-fix-opentelemetry-callback.yml diff --git a/changelogs/fragments/8741-fix-opentelemetry-callback.yml b/changelogs/fragments/8741-fix-opentelemetry-callback.yml new file mode 100644 index 0000000000..1b5e63a89f --- /dev/null +++ b/changelogs/fragments/8741-fix-opentelemetry-callback.yml @@ -0,0 +1,2 @@ +minor_changes: + - opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741). diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 2b2a5706fc..8dc627c214 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -85,7 +85,6 @@ DOCUMENTATION = ''' key: disable_attributes_in_logs version_added: 7.1.0 store_spans_in_file: - default: None type: str description: - It stores the exported spans in the given file From 2942eda8e0b2a37203e19eb9b1fe1704a91f2538 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 12 Aug 2024 07:55:17 +0200 Subject: [PATCH 196/482] keycloak_user_federation: add mapper removal (#8695) * add unwanted mapper removal * check for mapper updates in already fetched data to remove unnecessary API calls * added mock answers and updated request count to match the added delete and fetch after_comp calls * fix sanity issues * add changelog fragment * removed automatic field numbering from format * replace filter expression with list comprehension Co-authored-by: Felix Fontein * add module name, link to issue and link to PR to changelog Co-authored-by: Felix Fontein * Use list comprehension. --------- Co-authored-by: Felix Fontein --- ...eycloak_user_federation-mapper-removal.yml | 2 + plugins/modules/keycloak_user_federation.py | 77 ++++++++++++------- .../modules/test_keycloak_user_federation.py | 54 +++++++++++-- 3 files changed, 98 insertions(+), 35 deletions(-) create mode 100644 changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml diff --git a/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml b/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml new file mode 100644 index 0000000000..b518d59e36 --- /dev/null +++ b/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index f87ef936ce..00f407ec03 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -892,11 +892,11 @@ def main(): if cid is None: old_mapper = {} elif change.get('id') is not None: - old_mapper = kc.get_component(change['id'], realm) + old_mapper = next((before_mapper for before_mapper in before_mapper.get('mappers', []) if before_mapper["id"] == change['id']), None) if old_mapper is None: old_mapper = {} else: - found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) + found = [before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper['name'] == change['name']] if len(found) > 1: module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) if len(found) == 1: @@ -905,10 +905,10 @@ def main(): old_mapper = {} new_mapper = old_mapper.copy() new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) + # changeset contains all desired mappers: those existing, to update or to create + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) desired_comp = before_comp.copy() @@ -931,42 +931,51 @@ def main(): # Process a creation result['changed'] = True - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) - if module.check_mode: + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_comp)) module.exit_json(**result) # create it - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop('mappers', []) after_comp = kc.create_component(desired_comp, realm) - cid = after_comp['id'] + updated_mappers = [] + # when creating a user federation, keycloak automatically creates default mappers + default_mappers = kc.get_components(urlencode(dict(parent=cid)), realm) - for mapper in updated_mappers: - found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm) + # create new mappers or update existing default mappers + for desired_mapper in desired_mappers: + found = [default_mapper for default_mapper in default_mappers if default_mapper['name'] == desired_mapper['name']] if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name'])) + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=desired_mapper['name'])) if len(found) == 1: old_mapper = found[0] else: old_mapper = {} new_mapper = old_mapper.copy() - new_mapper.update(mapper) + new_mapper.update(desired_mapper) if new_mapper.get('id') is not None: kc.update_component(new_mapper, realm) + updated_mappers.append(new_mapper) else: if new_mapper.get('parentId') is None: - new_mapper['parentId'] = after_comp['id'] - mapper = kc.create_component(new_mapper, realm) + new_mapper['parentId'] = cid + updated_mappers.append(kc.create_component(new_mapper, realm)) - after_comp['mappers'] = updated_mappers + # we remove all unwanted default mappers + # we use ids so we dont accidently remove one of the previously updated default mapper + for default_mapper in default_mappers: + if not default_mapper['id'] in [x['id'] for x in updated_mappers]: + kc.delete_component(default_mapper['id'], realm) + + after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + if module._diff: + result['diff'] = dict(before='', after=sanitize(after_comp)) result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) + result['msg'] = "User federation {id} has been created".format(id=cid) module.exit_json(**result) else: @@ -990,22 +999,32 @@ def main(): module.exit_json(**result) # do the update - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop('mappers', []) kc.update_component(desired_comp, realm) - after_comp = kc.get_component(cid, realm) - for mapper in updated_mappers: + for before_mapper in before_comp.get('mappers', []): + # remove unwanted existing mappers that will not be updated + if not before_mapper['id'] in [x['id'] for x in desired_mappers]: + kc.delete_component(before_mapper['id'], realm) + + for mapper in desired_mappers: + if mapper in before_comp.get('mappers', []): + continue if mapper.get('id') is not None: kc.update_component(mapper, realm) else: if mapper.get('parentId') is None: mapper['parentId'] = desired_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) + kc.create_component(mapper, realm) + after_comp = kc.get_component(cid, realm) + after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + after_comp_sanitized = sanitize(after_comp) + before_comp_sanitized = sanitize(before_comp) + result['end_state'] = after_comp_sanitized + if module._diff: + result['diff'] = dict(before=before_comp_sanitized, after=after_comp_sanitized) + result['changed'] = before_comp_sanitized != after_comp_sanitized result['msg'] = "User federation {id} has been updated".format(id=cid) module.exit_json(**result) diff --git a/tests/unit/plugins/modules/test_keycloak_user_federation.py b/tests/unit/plugins/modules/test_keycloak_user_federation.py index 523ef9f210..81fd65e108 100644 --- a/tests/unit/plugins/modules/test_keycloak_user_federation.py +++ b/tests/unit/plugins/modules/test_keycloak_user_federation.py @@ -144,8 +144,9 @@ class TestKeycloakUserFederation(ModuleTestCase): } } ] + # get before_comp, get default_mapper, get after_mapper return_value_components_get = [ - [], [] + [], [], [] ] changed = True @@ -159,7 +160,7 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) self.assertEqual(len(mock_create_component.mock_calls), 1) self.assertEqual(len(mock_update_component.mock_calls), 0) @@ -228,6 +229,7 @@ class TestKeycloakUserFederation(ModuleTestCase): } } ], + [], [] ] return_value_component_get = [ @@ -281,7 +283,7 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 2) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 0) self.assertEqual(len(mock_update_component.mock_calls), 1) @@ -344,7 +346,47 @@ class TestKeycloakUserFederation(ModuleTestCase): ] } return_value_components_get = [ - [], [] + [], + # exemplary default mapper created by keylocak + [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "77e1763f-c51a-4286-bade-75577d64803c", + "name": "email", + "parentId": "e5f48aa3-b56b-4983-a8ad-2c7b8b5e77cb", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + }, + ], + [ + { + "id": "2dfadafd-8b34-495f-a98b-153e71a22311", + "name": "full name", + "providerId": "full-name-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", + "parentId": "eb691537-b73c-4cd8-b481-6031c26499d8", + "config": { + "ldap.full.name.attribute": [ + "cn" + ], + "read.only": [ + "true" + ], + "write.only": [ + "false" + ] + } + } + ] + ] + return_value_component_delete = [ + None ] return_value_component_create = [ { @@ -462,11 +504,11 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 2) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) self.assertEqual(len(mock_create_component.mock_calls), 2) self.assertEqual(len(mock_update_component.mock_calls), 0) - self.assertEqual(len(mock_delete_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected self.assertIs(exec_info.exception.args[0]['changed'], changed) From 2eec853e9e9e4838ea796a417b8bd4a9134c81ee Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 12 Aug 2024 09:22:02 +0200 Subject: [PATCH 197/482] The next minor release will be 9.4.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 57232d9e56..e625445649 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.3.0 +version: 9.4.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 4cb4c608d0fe8c95664875a2f5be0263fe32c89f Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 12 Aug 2024 12:27:45 +0200 Subject: [PATCH 198/482] README: Add Communication section with Forum information (#8732) * README: Add Communication section with Forum information * Changelog fragment isn't needed for README change. * Distinguish between Get Help and the community-general tag. * Update links. --------- Co-authored-by: Felix Fontein --- README.md | 24 ++++++++++++------------ docs/docsite/links.yml | 5 ++++- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index b5a6fcfa24..efdb33f065 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,18 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. +## Communication + +* Join the Ansible forum: + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. + * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. + * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. + * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. + +* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes. + +For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). + ## Tested with Ansible Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. @@ -98,18 +110,6 @@ It is necessary for maintainers of this collection to be subscribed to: They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - ## Publishing New Version See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index 65c8992bba..d9d9a27059 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -26,6 +26,9 @@ communication: - topic: Ansible Project List url: https://groups.google.com/g/ansible-project forums: - - topic: Ansible Forum + - topic: "Ansible Forum: General usage and support questions" # The following URL directly points to the "Get Help" section url: https://forum.ansible.com/c/help/6/none + - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins" + # The following URL directly points to the "community-general" tag + url: https://forum.ansible.com/tag/community-general From 34519a5ecbac2c7a29486584f2d9ca9fd7b71eb0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 12 Aug 2024 17:04:49 +0200 Subject: [PATCH 199/482] Improve communication info (#8756) Improve communication info. --- docs/docsite/links.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index d9d9a27059..32d56eacc6 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -9,6 +9,8 @@ edit_on_github: path_prefix: '' extra_links: + - description: Ask for help + url: https://forum.ansible.com/c/help/6/none - description: Submit a bug report url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml - description: Request a feature From e1b5ddb050d7b27db276e6a2eebddce22bad0402 Mon Sep 17 00:00:00 2001 From: Julien Lecomte Date: Wed, 14 Aug 2024 19:45:21 +0200 Subject: [PATCH 200/482] gitlab_project: sort parameters in order to avoid futur merge conflicts (#8759) --- .../8759-gitlab_project-sort-params.yml | 2 + plugins/modules/gitlab_project.py | 686 +++++++++--------- 2 files changed, 345 insertions(+), 343 deletions(-) create mode 100644 changelogs/fragments/8759-gitlab_project-sort-params.yml diff --git a/changelogs/fragments/8759-gitlab_project-sort-params.yml b/changelogs/fragments/8759-gitlab_project-sort-params.yml new file mode 100644 index 0000000000..2ff2ed18a7 --- /dev/null +++ b/changelogs/fragments/8759-gitlab_project-sort-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759). diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index b5e8bccc23..c98e442cb0 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -34,160 +34,17 @@ attributes: support: none options: - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: true - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: true - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: true - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: true - visibility: - description: - - V(private) Project access must be granted explicitly for each user. - - V(internal) The project can be cloned by any logged in user. - - V(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" allow_merge_on_skipped_pipeline: description: - Allow merge when skipped pipelines exist. type: bool version_added: "3.4.0" - only_allow_merge_if_all_discussions_are_resolved: - description: - - All discussions on a merge request (MR) have to be resolved. - type: bool - version_added: "3.4.0" - only_allow_merge_if_pipeline_succeeds: - description: - - Only allow merges if pipeline succeeded. - type: bool - version_added: "3.4.0" - packages_enabled: - description: - - Enable GitLab package repository. - type: bool - version_added: "3.4.0" - remove_source_branch_after_merge: - description: - - Remove the source branch after merge. - type: bool - version_added: "3.4.0" - squash_option: - description: - - Squash commits when merging. - type: str - choices: ["never", "always", "default_off", "default_on"] - version_added: "3.4.0" - ci_config_path: - description: - - Custom path to the CI configuration file for this project. - type: str - version_added: "3.7.0" - shared_runners_enabled: - description: - - Enable shared runners for this project. - type: bool - version_added: "3.7.0" avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: "4.2.0" - default_branch: - description: - - The default branch name for this project. - - For project creation, this option requires O(initialize_with_readme=true). - - For project update, the branch must exist. - - Supports project's default branch update since community.general 8.0.0. - type: str - version_added: "4.2.0" - repository_access_level: - description: - - V(private) means that accessing repository is allowed only to project members. - - V(disabled) means that accessing repository is disabled. - - V(enabled) means that accessing repository is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "9.3.0" builds_access_level: description: - V(private) means that repository CI/CD is allowed only to project members. @@ -196,77 +53,11 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.2.0" - forking_access_level: + ci_config_path: description: - - V(private) means that repository forks is allowed only to project members. - - V(disabled) means that repository forks are disabled. - - V(enabled) means that repository forks are enabled. + - Custom path to the CI configuration file for this project. type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" - container_registry_access_level: - description: - - V(private) means that container registry is allowed only to project members. - - V(disabled) means that container registry is disabled. - - V(enabled) means that container registry is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" - releases_access_level: - description: - - V(private) means that accessing release is allowed only to project members. - - V(disabled) means that accessing release is disabled. - - V(enabled) means that accessing release is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - environments_access_level: - description: - - V(private) means that deployment to environment is allowed only to project members. - - V(disabled) means that deployment to environment is disabled. - - V(enabled) means that deployment to environment is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - feature_flags_access_level: - description: - - V(private) means that feature rollout is allowed only to project members. - - V(disabled) means that feature rollout is disabled. - - V(enabled) means that feature rollout is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - infrastructure_access_level: - description: - - V(private) means that configuring infrastructure is allowed only to project members. - - V(disabled) means that configuring infrastructure is disabled. - - V(enabled) means that configuring infrastructure is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - monitor_access_level: - description: - - V(private) means that monitoring health is allowed only to project members. - - V(disabled) means that monitoring health is disabled. - - V(enabled) means that monitoring health is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - security_and_compliance_access_level: - description: - - V(private) means that accessing security and complicance tab is allowed only to project members. - - V(disabled) means that accessing security and complicance tab is disabled. - - V(enabled) means that accessing security and complicance tab is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" - topics: - description: - - A topic or list of topics to be assigned to a project. - - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). - type: list - elements: str - version_added: "6.6.0" + version_added: "3.7.0" container_expiration_policy: description: - Project cleanup policy for its container registry. @@ -302,19 +93,103 @@ options: - Keep tags matching this regular expression. type: str version_added: "9.3.0" - pages_access_level: + container_registry_access_level: description: - - V(private) means that accessing pages tab is allowed only to project members. - - V(disabled) means that accessing pages tab is disabled. - - V(enabled) means that accessing pages tab is enabled. + - V(private) means that container registry is allowed only to project members. + - V(disabled) means that container registry is disabled. + - V(enabled) means that container registry is enabled. type: str choices: ["private", "disabled", "enabled"] - version_added: "9.3.0" - service_desk_enabled: + version_added: "6.2.0" + default_branch: description: - - Enable Service Desk. + - The default branch name for this project. + - For project creation, this option requires O(initialize_with_readme=true). + - For project update, the branch must exist. + - Supports project's default branch update since community.general 8.0.0. + type: str + version_added: "4.2.0" + description: + description: + - An description for the project. + type: str + environments_access_level: + description: + - V(private) means that deployment to environment is allowed only to project members. + - V(disabled) means that deployment to environment is disabled. + - V(enabled) means that deployment to environment is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + feature_flags_access_level: + description: + - V(private) means that feature rollout is allowed only to project members. + - V(disabled) means that feature rollout is disabled. + - V(enabled) means that feature rollout is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + forking_access_level: + description: + - V(private) means that repository forks is allowed only to project members. + - V(disabled) means that repository forks are disabled. + - V(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + group: + description: + - Id or the full path of the group of which this projects belongs to. + type: str + import_url: + description: + - Git repository which will be imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + infrastructure_access_level: + description: + - V(private) means that configuring infrastructure is allowed only to project members. + - V(disabled) means that configuring infrastructure is disabled. + - V(enabled) means that configuring infrastructure is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + initialize_with_readme: + description: + - Will initialize the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. type: bool - version_added: "9.3.0" + default: false + version_added: "4.0.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - Possible values are true and false. + type: bool + default: true + lfs_enabled: + description: + - Enable Git large file systems to manages large files such + as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + merge_requests_enabled: + description: + - If merge requests can be made or not. + - Possible values are true and false. + type: bool + default: true model_registry_access_level: description: - V(private) means that accessing model registry tab is allowed only to project members. @@ -323,6 +198,131 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "9.3.0" + monitor_access_level: + description: + - V(private) means that monitoring health is allowed only to project members. + - V(disabled) means that monitoring health is disabled. + - V(enabled) means that monitoring health is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + name: + description: + - The name of the project. + required: true + type: str + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + path: + description: + - The path of the project you want to create, this will be server_url//path. + - If not supplied, name will be used. + type: str + releases_access_level: + description: + - V(private) means that accessing release is allowed only to project members. + - V(disabled) means that accessing release is disabled. + - V(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + security_and_compliance_access_level: + description: + - V(private) means that accessing security and complicance tab is allowed only to project members. + - V(disabled) means that accessing security and complicance tab is disabled. + - V(enabled) means that accessing security and complicance tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + state: + description: + - Create or delete project. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + topics: + description: + - A topic or list of topics to be assigned to a project. + - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). + type: list + elements: str + version_added: "6.6.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + visibility: + description: + - V(private) Project access must be granted explicitly for each user. + - V(internal) The project can be cloned by any logged in user. + - V(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + wiki_enabled: + description: + - If an wiki for this project should be available or not. + type: bool + default: true ''' EXAMPLES = r''' @@ -422,37 +422,37 @@ class GitLabProject(object): def create_or_update_project(self, module, project_name, namespace, options): changed = False project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'builds_access_level': options['builds_access_level'], + 'ci_config_path': options['ci_config_path'], + 'container_expiration_policy': options['container_expiration_policy'], + 'container_registry_access_level': options['container_registry_access_level'], + 'description': options['description'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'forking_access_level': options['forking_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_enabled': options['issues_enabled'], + 'lfs_enabled': options['lfs_enabled'], + 'merge_method': options['merge_method'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'name': project_name, 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], 'packages_enabled': options['packages_enabled'], - 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], - 'shared_runners_enabled': options['shared_runners_enabled'], - 'repository_access_level': options['repository_access_level'], - 'builds_access_level': options['builds_access_level'], - 'forking_access_level': options['forking_access_level'], - 'container_registry_access_level': options['container_registry_access_level'], - 'releases_access_level': options['releases_access_level'], - 'environments_access_level': options['environments_access_level'], - 'feature_flags_access_level': options['feature_flags_access_level'], - 'infrastructure_access_level': options['infrastructure_access_level'], - 'monitor_access_level': options['monitor_access_level'], - 'security_and_compliance_access_level': options['security_and_compliance_access_level'], - 'container_expiration_policy': options['container_expiration_policy'], 'pages_access_level': options['pages_access_level'], + 'releases_access_level': options['releases_access_level'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'repository_access_level': options['repository_access_level'], + 'security_and_compliance_access_level': options['security_and_compliance_access_level'], 'service_desk_enabled': options['service_desk_enabled'], - 'model_registry_access_level': options['model_registry_access_level'], + 'shared_runners_enabled': options['shared_runners_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'squash_option': options['squash_option'], + 'visibility': options['visibility'], + 'wiki_enabled': options['wiki_enabled'], } # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version @@ -465,7 +465,7 @@ class GitLabProject(object): # Because we have already call userExists in main() if self.project_object is None: if options['default_branch'] and not options['initialize_with_readme']: - module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") + module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") project_options.update({ 'path': options['path'], 'import_url': options['import_url'], @@ -499,7 +499,7 @@ class GitLabProject(object): try: project.save() except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) + self._module.fail_json(msg="Failed to update project: %s " % e) return True return False @@ -583,42 +583,10 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - default_branch=dict(type='str'), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), allow_merge_on_skipped_pipeline=dict(type='bool'), - only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), - only_allow_merge_if_pipeline_succeeds=dict(type='bool'), - packages_enabled=dict(type='bool'), - remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), - shared_runners_enabled=dict(type='bool'), avatar_path=dict(type='path'), - repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - topics=dict(type='list', elements='str'), + ci_config_path=dict(type='str'), container_expiration_policy=dict(type='dict', default=None, options=dict( cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), enabled=dict(type='bool'), @@ -627,9 +595,41 @@ def main(): name_regex=dict(type='str'), name_regex_keep=dict(type='str'), )), - pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - service_desk_enabled=dict(type='bool'), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + default_branch=dict(type='str'), + description=dict(type='str'), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + group=dict(type='str'), + import_url=dict(type='str'), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + initialize_with_readme=dict(type='bool', default=False), + issues_enabled=dict(type='bool', default=True), + lfs_enabled=dict(default=False, type='bool'), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type='bool', default=True), model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + name=dict(type='str', required=True), + only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), + only_allow_merge_if_pipeline_succeeds=dict(type='bool'), + packages_enabled=dict(type='bool'), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + path=dict(type='str'), + releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + remove_source_branch_after_merge=dict(type='bool'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), + shared_runners_enabled=dict(type='bool'), + snippets_enabled=dict(default=True, type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + state=dict(type='str', default="present", choices=["absent", "present"]), + topics=dict(type='list', elements='str'), + username=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + wiki_enabled=dict(type='bool', default=True), )) module = AnsibleModule( @@ -654,46 +654,46 @@ def main(): # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + avatar_path = module.params['avatar_path'] + builds_access_level = module.params['builds_access_level'] + ci_config_path = module.params['ci_config_path'] + container_expiration_policy = module.params['container_expiration_policy'] + container_registry_access_level = module.params['container_registry_access_level'] + default_branch = module.params['default_branch'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + forking_access_level = module.params['forking_access_level'] group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] + import_url = module.params['import_url'] + infrastructure_access_level = module.params['infrastructure_access_level'] initialize_with_readme = module.params['initialize_with_readme'] issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] - allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + merge_method = module.params['merge_method'] + merge_requests_enabled = module.params['merge_requests_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] + monitor_access_level = module.params['monitor_access_level'] only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] packages_enabled = module.params['packages_enabled'] - remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] - shared_runners_enabled = module.params['shared_runners_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - repository_access_level = module.params['repository_access_level'] - builds_access_level = module.params['builds_access_level'] - forking_access_level = module.params['forking_access_level'] - container_registry_access_level = module.params['container_registry_access_level'] - releases_access_level = module.params['releases_access_level'] - environments_access_level = module.params['environments_access_level'] - feature_flags_access_level = module.params['feature_flags_access_level'] - infrastructure_access_level = module.params['infrastructure_access_level'] - monitor_access_level = module.params['monitor_access_level'] - security_and_compliance_access_level = module.params['security_and_compliance_access_level'] - topics = module.params['topics'] - container_expiration_policy = module.params['container_expiration_policy'] pages_access_level = module.params['pages_access_level'] + project_description = module.params['description'] + project_name = module.params['name'] + project_path = module.params['path'] + releases_access_level = module.params['releases_access_level'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + repository_access_level = module.params['repository_access_level'] + security_and_compliance_access_level = module.params['security_and_compliance_access_level'] service_desk_enabled = module.params['service_desk_enabled'] - model_registry_access_level = module.params['model_registry_access_level'] + shared_runners_enabled = module.params['shared_runners_enabled'] + snippets_enabled = module.params['snippets_enabled'] + squash_option = module.params['squash_option'] + state = module.params['state'] + topics = module.params['topics'] + username = module.params['username'] + visibility = module.params['visibility'] + wiki_enabled = module.params['wiki_enabled'] # Set project_path to project_name if it is empty. if project_path is None: @@ -737,42 +737,42 @@ def main(): if state == 'present': if gitlab_project.create_or_update_project(module, project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "default_branch": default_branch, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, "packages_enabled": packages_enabled, - "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, - "shared_runners_enabled": shared_runners_enabled, - "avatar_path": avatar_path, - "repository_access_level": repository_access_level, - "builds_access_level": builds_access_level, - "forking_access_level": forking_access_level, - "container_registry_access_level": container_registry_access_level, - "releases_access_level": releases_access_level, - "environments_access_level": environments_access_level, - "feature_flags_access_level": feature_flags_access_level, - "infrastructure_access_level": infrastructure_access_level, - "monitor_access_level": monitor_access_level, - "security_and_compliance_access_level": security_and_compliance_access_level, - "topics": topics, - "container_expiration_policy": container_expiration_policy, "pages_access_level": pages_access_level, + "path": project_path, + "releases_access_level": releases_access_level, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "repository_access_level": repository_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, "service_desk_enabled": service_desk_enabled, - "model_registry_access_level": model_registry_access_level, + "shared_runners_enabled": shared_runners_enabled, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, + "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) From 9571ec7c725184e8a557cbb0c8b3ae5158771e27 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 15 Aug 2024 21:40:21 +0200 Subject: [PATCH 201/482] Improve communication link description. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index efdb33f065..53354b93f9 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https: ## Communication * Join the Ansible forum: - * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions. * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. From 14e86bde07f92c2574a791cfb8b0dfa82e5dae97 Mon Sep 17 00:00:00 2001 From: Jyrki Gadinger Date: Sat, 17 Aug 2024 15:17:24 +0200 Subject: [PATCH 202/482] one_template: update name in copyright (#8770) finally got it changed this year :) --- plugins/modules/one_template.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py index 06460fee57..1fcc81c540 100644 --- a/plugins/modules/one_template.py +++ b/plugins/modules/one_template.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # -# Copyright (c) 2021, Georg Gadinger +# Copyright (c) 2021, Jyrki Gadinger # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -60,7 +60,7 @@ extends_documentation_fragment: - community.general.attributes author: - - "Georg Gadinger (@nilsding)" + - "Jyrki Gadinger (@nilsding)" ''' EXAMPLES = ''' From c84fb5577b6f39a0c8c03e43daaf143d359bbbcd Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 18 Aug 2024 01:20:00 +1200 Subject: [PATCH 203/482] MH deco: minor refactor (#8766) * MH deco: minor refactor * adjustments and improvement in test * sanity fix * use func.__self__ * simplify use of self * add changelog frag --- changelogs/fragments/8766-mh-deco-improve.yml | 3 ++ plugins/module_utils/mh/deco.py | 25 +++++----- .../module_utils/test_module_helper.py | 50 ++++++++++--------- 3 files changed, 41 insertions(+), 37 deletions(-) create mode 100644 changelogs/fragments/8766-mh-deco-improve.yml diff --git a/changelogs/fragments/8766-mh-deco-improve.yml b/changelogs/fragments/8766-mh-deco-improve.yml new file mode 100644 index 0000000000..7bf104d2cc --- /dev/null +++ b/changelogs/fragments/8766-mh-deco-improve.yml @@ -0,0 +1,3 @@ +minor_changes: + - MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766). + - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 5138b212c7..ecfebfd769 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -13,23 +13,26 @@ from functools import wraps from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -def cause_changes(on_success=None, on_failure=None): +def cause_changes(on_success=None, on_failure=None, when=None): def deco(func): - if on_success is None and on_failure is None: - return func - @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(self, *args, **kwargs): try: - self = args[0] - func(*args, **kwargs) + func(self, *args, **kwargs) if on_success is not None: self.changed = on_success + elif when == "success": + self.changed = True except Exception: if on_failure is not None: self.changed = on_failure + elif when == "failure": + self.changed = True raise + finally: + if when == "always": + self.changed = True return wrapper @@ -50,8 +53,6 @@ def module_fails_on_exception(func): try: func(self, *args, **kwargs) - except SystemExit: - raise except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) @@ -73,6 +74,7 @@ def check_mode_skip(func): def wrapper(self, *args, **kwargs): if not self.module.check_mode: return func(self, *args, **kwargs) + return wrapper @@ -87,7 +89,7 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_callable - if value is not None: + else: @wraps(func) def wrapper_value(self, *args, **kwargs): if self.module.check_mode: @@ -95,7 +97,4 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_value - if callable is None and value is None: - return check_mode_skip - return deco diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index b2cd58690d..d329765051 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -119,28 +119,22 @@ def test_variable_meta_change(): assert vd.has_changed('d') -class MockMH(object): - changed = None - - def _div(self, x, y): - return x / y - - func_none = cause_changes()(_div) - func_onsucc = cause_changes(on_success=True)(_div) - func_onfail = cause_changes(on_failure=True)(_div) - func_onboth = cause_changes(on_success=True, on_failure=True)(_div) - - -CAUSE_CHG_DECO_PARAMS = ['method', 'expect_exception', 'expect_changed'] +CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed'] CAUSE_CHG_DECO = dict( - none_succ=dict(method='func_none', expect_exception=False, expect_changed=None), - none_fail=dict(method='func_none', expect_exception=True, expect_changed=None), - onsucc_succ=dict(method='func_onsucc', expect_exception=False, expect_changed=True), - onsucc_fail=dict(method='func_onsucc', expect_exception=True, expect_changed=None), - onfail_succ=dict(method='func_onfail', expect_exception=False, expect_changed=None), - onfail_fail=dict(method='func_onfail', expect_exception=True, expect_changed=True), - onboth_succ=dict(method='func_onboth', expect_exception=False, expect_changed=True), - onboth_fail=dict(method='func_onboth', expect_exception=True, expect_changed=True), + none_succ=dict(deco_args={}, expect_exception=False, expect_changed=None), + none_fail=dict(deco_args={}, expect_exception=True, expect_changed=None), + onsucc_succ=dict(deco_args=dict(on_success=True), expect_exception=False, expect_changed=True), + onsucc_fail=dict(deco_args=dict(on_success=True), expect_exception=True, expect_changed=None), + onfail_succ=dict(deco_args=dict(on_failure=True), expect_exception=False, expect_changed=None), + onfail_fail=dict(deco_args=dict(on_failure=True), expect_exception=True, expect_changed=True), + onboth_succ=dict(deco_args=dict(on_success=True, on_failure=True), expect_exception=False, expect_changed=True), + onboth_fail=dict(deco_args=dict(on_success=True, on_failure=True), expect_exception=True, expect_changed=True), + whensucc_succ=dict(deco_args=dict(when="success"), expect_exception=False, expect_changed=True), + whensucc_fail=dict(deco_args=dict(when="success"), expect_exception=True, expect_changed=None), + whenfail_succ=dict(deco_args=dict(when="failure"), expect_exception=False, expect_changed=None), + whenfail_fail=dict(deco_args=dict(when="failure"), expect_exception=True, expect_changed=True), + whenalways_succ=dict(deco_args=dict(when="always"), expect_exception=False, expect_changed=True), + whenalways_fail=dict(deco_args=dict(when="always"), expect_exception=True, expect_changed=True), ) CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) @@ -150,12 +144,20 @@ CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) for param in CAUSE_CHG_DECO_PARAMS] for tc in CAUSE_CHG_DECO_IDS], ids=CAUSE_CHG_DECO_IDS) -def test_cause_changes_deco(method, expect_exception, expect_changed): +def test_cause_changes_deco(deco_args, expect_exception, expect_changed): + + class MockMH(object): + changed = None + + @cause_changes(**deco_args) + def div_(self, x, y): + return x / y + mh = MockMH() if expect_exception: with pytest.raises(Exception): - getattr(mh, method)(1, 0) + mh.div_(1, 0) else: - getattr(mh, method)(9, 3) + mh.div_(9, 3) assert mh.changed == expect_changed From b79ac4f0ac3225da70fa52d2c6df2734457813e4 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Tue, 20 Aug 2024 08:56:27 +0200 Subject: [PATCH 204/482] keycloak_user_federation: fix key error when removing mappers in update (#8762) * remove new mappers without an id from list comprehension * add changelog fragment * Update changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...762-keycloac_user_federation-fix-key-error-when-updating.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml diff --git a/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml b/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml new file mode 100644 index 0000000000..08da8ae21a --- /dev/null +++ b/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 00f407ec03..e327d4ac20 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -1004,7 +1004,7 @@ def main(): for before_mapper in before_comp.get('mappers', []): # remove unwanted existing mappers that will not be updated - if not before_mapper['id'] in [x['id'] for x in desired_mappers]: + if not before_mapper['id'] in [x['id'] for x in desired_mappers if 'id' in x]: kc.delete_component(before_mapper['id'], realm) for mapper in desired_mappers: From e5bc38d856b3db0c436ccf63538bd8def9594dca Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 20 Aug 2024 18:57:29 +1200 Subject: [PATCH 205/482] MH: use mute_vardict_deprecation (#8776) * use mute_vardict_deprecation * add changelog frag --- changelogs/fragments/8776-mute-vardict-deprecation.yml | 3 +++ plugins/modules/gio_mime.py | 1 + plugins/modules/jira.py | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8776-mute-vardict-deprecation.yml diff --git a/changelogs/fragments/8776-mute-vardict-deprecation.yml b/changelogs/fragments/8776-mute-vardict-deprecation.yml new file mode 100644 index 0000000000..a74e40e923 --- /dev/null +++ b/changelogs/fragments/8776-mute-vardict-deprecation.yml @@ -0,0 +1,3 @@ +minor_changes: + - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). + - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 27f90581ef..82c583c76f 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -84,6 +84,7 @@ class GioMime(ModuleHelper): ), supports_check_mode=True, ) + mute_vardict_deprecation = True def __init_module__(self): self.runner = gio_mime_runner(self.module, check_rc=True) diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index c36cf99375..db3eca06bb 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -531,7 +531,7 @@ class JIRA(StateModuleHelper): ), supports_check_mode=False ) - + mute_vardict_deprecation = True state_param = 'operation' def __init_module__(self): From e3a3c6d58f4034d53dd4dc6aa26e38aef31a6fc8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 22 Aug 2024 03:52:30 +1200 Subject: [PATCH 206/482] ModuleHelper guide (#8771) * initial commit * fix initial version * add quickstart and high-level outline * MH guide progress * MH guide progress (up to params,vars,output) * adjustments * MH guide progress (up to handling changes) * MH guide progress (up to Exceptions) * typo * change section from note to important * MH guide progress (added StateModuleHelper) * minor improvement * MH guide progress (added decorators) * typo * minor adjustments * remove line * complete MH guide * adjustments * adjustments * change paragraph into seealso * rearrange sections, plus wordsmithing * adjustments * wordsmithing * fix references --- .github/BOTMETA.yml | 2 + docs/docsite/extra-docs.yml | 1 + docs/docsite/rst/guide_modulehelper.rst | 540 ++++++++++++++++++++++++ 3 files changed, 543 insertions(+) create mode 100644 docs/docsite/rst/guide_modulehelper.rst diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f73da1e874..fe0c1a62b6 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1485,6 +1485,8 @@ files: maintainers: russoz docs/docsite/rst/guide_deps.rst: maintainers: russoz + docs/docsite/rst/guide_modulehelper.rst: + maintainers: russoz docs/docsite/rst/guide_online.rst: maintainers: remyleone docs/docsite/rst/guide_packet.rst: diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index aebe82f42e..f73d0fe012 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -19,3 +19,4 @@ sections: - guide_deps - guide_vardict - guide_cmdrunner + - guide_modulehelper diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst new file mode 100644 index 0000000000..68b46e6c94 --- /dev/null +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -0,0 +1,540 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_modulehelper: + +Module Helper guide +=================== + + +Introduction +^^^^^^^^^^^^ + +Writing a module for Ansible is largely described in existing documentation. +However, a good part of that is boilerplate code that needs to be repeated every single time. +That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart: + +Quickstart +"""""""""" + +See the `example from Ansible documentation `_ +written with ``ModuleHelper``. +But bear in mind that it does not showcase all of MH's features: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + new=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + use_old_vardict = False + + def __run__(self): + self.vars.original_message = '' + self.vars.message = '' + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + + +Module Helper +^^^^^^^^^^^^^ + +Introduction +"""""""""""" + +``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences. +The basic structure of a module using ``ModuleHelper`` is as shown in the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +section above, but there are more elements that will take part in it. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + class MyTest(ModuleHelper): + output_params = () + change_params = () + diff_params = () + facts_name = None + facts_params = () + use_old_vardict = True + mute_vardict_deprecation = False + module = dict( + argument_spec=dict(...), + # ... + ) + +After importing the ``ModuleHelper`` class, you need to declare your own class extending it. + +.. seealso:: + + There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH. + See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details. + +The easiest way of specifying the module is to create the class variable ``module`` with a dictionary +containing the exact arguments that would be passed as parameters to ``AnsibleModule``. +If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable. +MH also accepts a parameter ``module`` in its constructor, if that parameter is used used, +then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well. + +Beyond the definition of the module, there are other variables that can be used to control aspects +of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are +explained through this document. + +The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like: + +.. code-block:: python + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) + +The method ``ModuleHelper.__run__()`` must be implemented by the module and most +modules will be able to perform their actions implementing only that MH method. +However, in some cases, you might want to execute actions before or after the main tasks, in which cases +you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively. + +Note that the output comes from ``self.output``, which is a ``@property`` method. +By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values. +Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*. +Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked +to track changes in their content. + +.. seealso:: + + More details in sections + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +.. seealso:: + + See more about the decorator + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below. + + +Another way to write the example from the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +would be: + +.. code-block:: python + + def __init_module__(self): + self.vars.original_message = '' + self.vars.message = '' + + def __run__(self): + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + + def __quit_module__(self): + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + +Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception. +You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that. +If no exception is raised, then the module succeeds. + +.. seealso:: + + See more about exceptions in section + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below. + +Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like: + +.. code-block:: python + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + +The class method ``execute()`` is nothing more than a convenience shorcut for: + +.. code-block:: python + + m = MyTest() + m.run() + +Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput: + +Parameters, variables, and output +""""""""""""""""""""""""""""""""" + +All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type. +By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module. +As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. +One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. + +.. important:: + + The ``VarDict`` feature described was introduced in community.general 7.1.0, but there was a first + implementation of it embedded within ``ModuleHelper``. + That older implementation is now deprecated and will be removed in community.general 11.0.0. + After community.general 7.1.0, MH modules generate a deprecation message about *using the old VarDict*. + There are two ways to prevent that from happening: + + #. Set ``mute_vardict_deprecation = True`` and the deprecation will be silenced. If the module still uses the old ``VarDict``, + it will not be able to update to community.general 11.0.0 (Spring 2026) upon its release. + #. Set ``use_old_vardict = False`` to make the MH module use the new ``VarDict`` immediatelly. + The new ``VarDict`` and its use is documented and this is the recommended way to handle this. + + .. code-block:: python + + class MyTest(ModuleHelper): + use_old_vardict = False + mute_vardict_deprecation = True + ... + + These two settings are mutually exclusive, but that is not enforced and the behavior when setting both is not specified. + +Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. +If you want to include some module parameters in the output, list them in the ``output_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + output_params = ('state', 'name') + ... + +Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. +Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + # example from community.general.xfconf + change_params = ('value', ) + ... + +.. seealso:: + + See more about this in + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters. +With that, MH will automatically generate the diff output for variables that have changed. + +.. code-block:: python + + class MyTest(ModuleHelper): + diff_params = ('value', ) + + def __run__(self): + # example from community.general.gio_mime + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + +Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. +Additionally, you must specify ``facts_name``, as in: + +.. code-block:: python + + class VolumeFacts(ModuleHelper): + facts_name = 'volume_facts' + + def __init_module__(self): + self.vars.set("volume", 123, fact=True) + +That generates an Ansible fact like: + +.. code-block:: yaml+jinja + + - name: Obtain volume facts + some.collection.volume_facts: + # parameters + + - name: Print volume facts + debug: + msg: Volume fact is {{ ansible_facts.volume_facts.volume }} + +.. important:: + + If ``facts_name`` is not set, the module does not generate any facts. + + +.. _ansible_collections.community.general.docsite.guide_modulehelper.changes: + +Handling changes +"""""""""""""""" + +In MH there are many ways to indicate change in the module execution. Here they are: + +Tracking changes in variables +----------------------------- + +As explained above, you can enable change tracking in any number of variables in ``self.vars``. +By the end of the module execution, if any of those variables has a value different then the first value assigned to them, +then that will be picked up by MH and signalled as changed at the module output. +See the example below to learn how you can enabled change tracking in variables: + +.. code-block:: python + + # using __init_module__() as example, it works the same in __run__() and __quit_module__() + def __init_module__(self): + # example from community.general.ansible_galaxy_install + self.vars.set("new_roles", {}, change=True) + + # example of "hidden" variable used only to track change in a value from community.general.gconftool2 + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + + # enable change-tracking without assigning value + self.vars.set_meta("new_roles", change=True) + + # if you must forcibly set an initial value to the variable + self.vars.set_meta("new_roles", initial_value=[]) + ... + +If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``. + +Indicating changes with ``changed`` +----------------------------------- + +If you want to indicate change directly in the code, then use the ``self.changed`` property in MH. +Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*. +By default, that hidden field is set to ``False``. + +Effective change +---------------- + +The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation +between ``self.changed`` and the change calculated from ``self.vars``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions: + +Exceptions +"""""""""" + +In MH, instead of calling ``module.fail_json()`` you can just raise an exception. +The output variables are collected the same way they would be for a successful execution. +However, you can set output variables specifically for that exception, if you so choose. + +.. code-block:: python + + def __init_module__(self): + if not complex_validation(): + self.do_raise("Validation failed!") + + # Or passing output variables + awesomeness = calculate_awesomeness() + if awesomeness > 1000: + self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + +All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. +However, if you do want to call ``self.module.fail_json()`` yourself it will work, +just keep in mind that there will be no automatic handling of output variables in that case. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: + +StateModuleHelper +^^^^^^^^^^^^^^^^^ + +Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as +``state=present`` or ``state=absent`` for installing or removing packages. +By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + class GConftool(StateModuleHelper): + ... + module = dict( + ... + ) + use_old_vardict = False + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + ... + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + ... + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + +Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``. +In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``. + +If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it: + +.. code-block:: python + + class JIRA(StateModuleHelper): + state_param = 'operation' + + def operation_create(self): + ... + + def operation_search(self): + ... + +Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue`` +is not implemented, SMH will resort to call a method called ``__state_fallback__()``. +By default, this method will raise a ``ValueError`` indicating the method was not found. +Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`: + +.. code-block:: python + + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + if self.vars.ubuntu_mode: + self.apply_change_ubuntu(self.vars.state, self.vars.name) + else: + self.apply_change(self.vars.state, self.vars.name) + +That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method. + +.. note:: + + The name of the fallback method **does not change** if you set a different value of ``state_param``. + + +Other Conveniences +^^^^^^^^^^^^^^^^^^ + +Delegations to AnsibleModule +"""""""""""""""""""""""""""" + +The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``: + +- ``check_mode`` +- ``get_bin_path()`` +- ``warn()`` +- ``deprecate()`` + +Additionally, MH will also delegate: + +- ``diff_mode`` to ``self.module._diff`` +- ``verbosity`` to ``self.module._verbosity`` + +Decorators +"""""""""" + +The following decorators should only be used within ``ModuleHelper`` class. + +@cause_changes +-------------- + +This decorator will control whether the outcome of the method will cause the module to signal change in its output. +If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed. + +The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``. +There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them. +The value of ``changed`` in the module output will be set to ``True``: + +- ``when="success"`` and the method completes without raising an exception. +- ``when="failure"`` and the method raises an exception. +- ``when="always"``, regardless of the method raising an exception or not. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes + + # adapted excerpt from the community.general.jira module + class JIRA(StateModuleHelper): + @cause_changes(when="success") + def operation_create(self): + ... + +If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco: + +@module_fails_on_exception +-------------------------- + +In a method using this decorator, if an exception is raised, the text message of that exception will be captured +by the decorator and used to call ``self.module.fail_json()``. +In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it. + +@check_mode_skip +---------------- + +If the module is running in check mode, this decorator will prevent the method from executing. +The return value in that case is ``None``. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip + + # adapted excerpt from the community.general.locale_gen module + class LocaleGen(StateModuleHelper): + @check_mode_skip + def __state_fallback__(self): + ... + + +@check_mode_skip_returns +------------------------ + +This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode. +It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``, +where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method. + +The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode. + + +References +^^^^^^^^^^ + +- `Ansible Developer Guide `_ +- `Creating a module `_ +- `Returning ansible facts `_ +- :ref:`ansible_collections.community.general.docsite.guide_vardict` + + +.. versionadded:: 3.1.0 From 3607e3d012b07f7f1f63e71db99486e70e660d09 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 24 Aug 2024 17:14:13 +1200 Subject: [PATCH 207/482] pipx: add doc example (#8792) --- plugins/modules/pipx.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 372d4bec01..7f4954850f 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -163,6 +163,17 @@ EXAMPLES = ''' community.general.pipx: name: pycowsay state: absent + +- name: Install multiple packages from list + vars: + pipx_packages: + - pycowsay + - black + - tox + community.general.pipx: + name: "{{ item }}" + state: latest + with_items: "{{ pipx_packages }}" ''' From 5192ffe5b3e60290989b97f6d23661b4101d4622 Mon Sep 17 00:00:00 2001 From: Julien Lecomte Date: Sat, 24 Aug 2024 18:58:08 +0200 Subject: [PATCH 208/482] gitlab_project: add param "issues_access_level" (#8760) gitlab_project: add option `issues_access_level` to enable/disable project --- ...0-gitlab_project-add-issues-access-level.yml | 2 ++ plugins/modules/gitlab_project.py | 17 +++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml diff --git a/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml b/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml new file mode 100644 index 0000000000..1a77b2f0d4 --- /dev/null +++ b/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760). diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index c98e442cb0..7b53f8639c 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -162,10 +162,19 @@ options: type: bool default: false version_added: "4.0.0" + issues_access_level: + description: + - V(private) means that accessing issues tab is allowed only to project members. + - V(disabled) means that accessing issues tab is disabled. + - V(enabled) means that accessing issues tab is enabled. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.4.0" issues_enabled: description: - Whether you want to create issues or not. - - Possible values are true and false. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. type: bool default: true lfs_enabled: @@ -187,7 +196,6 @@ options: merge_requests_enabled: description: - If merge requests can be made or not. - - Possible values are true and false. type: bool default: true model_registry_access_level: @@ -432,6 +440,7 @@ class GitLabProject(object): 'feature_flags_access_level': options['feature_flags_access_level'], 'forking_access_level': options['forking_access_level'], 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_access_level': options['issues_access_level'], 'issues_enabled': options['issues_enabled'], 'lfs_enabled': options['lfs_enabled'], 'merge_method': options['merge_method'], @@ -605,6 +614,7 @@ def main(): import_url=dict(type='str'), infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), initialize_with_readme=dict(type='bool', default=False), + issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), issues_enabled=dict(type='bool', default=True), lfs_enabled=dict(default=False, type='bool'), merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), @@ -641,6 +651,7 @@ def main(): ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], ['group', 'username'], + ['issues_access_level', 'issues_enabled'], ], required_together=[ ['api_username', 'api_password'], @@ -668,6 +679,7 @@ def main(): import_url = module.params['import_url'] infrastructure_access_level = module.params['infrastructure_access_level'] initialize_with_readme = module.params['initialize_with_readme'] + issues_access_level = module.params['issues_access_level'] issues_enabled = module.params['issues_enabled'] lfs_enabled = module.params['lfs_enabled'] merge_method = module.params['merge_method'] @@ -751,6 +763,7 @@ def main(): "import_url": import_url, "infrastructure_access_level": infrastructure_access_level, "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, "issues_enabled": issues_enabled, "lfs_enabled": lfs_enabled, "merge_method": merge_method, From 7dc4429c9c56c6c281dc7216fa02abf77025078e Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Sat, 24 Aug 2024 18:59:34 +0200 Subject: [PATCH 209/482] keycloak_user_federation: add module arg to make mapper removal optout (#8764) * add module arg to make mapper removal optout * change parameter name to snake case: remove_unspecified_mappers * add period to parameter description Co-authored-by: Felix Fontein * use dict indexing to get parameter instead of `.get()` * add changelog fragment * Update changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml Co-authored-by: Felix Fontein * add `version_added` to argument description Co-authored-by: Felix Fontein * Update plugins/modules/keycloak_user_federation.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ..._federation-make-mapper-removal-optout.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 27 ++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml diff --git a/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml b/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml new file mode 100644 index 0000000000..c457012751 --- /dev/null +++ b/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index e327d4ac20..22cd36a64f 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -85,6 +85,14 @@ options: - parentId type: str + remove_unspecified_mappers: + description: + - Remove mappers that are not specified in the configuration for this federation. + - Set to V(false) to keep mappers that are not listed in O(mappers). + type: bool + default: true + version_added: 9.4.0 + config: description: - Dict specifying the configuration options for the provider; the contents differ depending on @@ -808,6 +816,7 @@ def main(): provider_id=dict(type='str', aliases=['providerId']), provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), parent_id=dict(type='str', aliases=['parentId']), + remove_unspecified_mappers=dict(type='bool', default=True), mappers=dict(type='list', elements='dict', options=mapper_spec), ) @@ -849,7 +858,7 @@ def main(): # Filter and map the parameters names that apply comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers', 'remove_unspecified_mappers'] and module.params.get(x) is not None] # See if it already exists in Keycloak @@ -910,6 +919,11 @@ def main(): changeset['mappers'] = list() changeset['mappers'].append(new_mapper) + # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present + if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: + changeset_mapper_ids = [mapper['id'] for mapper in changeset['mappers'] if 'id' in mapper] + changeset['mappers'].extend([mapper for mapper in before_comp['mappers'] if mapper['id'] not in changeset_mapper_ids]) + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) desired_comp = before_comp.copy() desired_comp.update(changeset) @@ -965,11 +979,12 @@ def main(): new_mapper['parentId'] = cid updated_mappers.append(kc.create_component(new_mapper, realm)) - # we remove all unwanted default mappers - # we use ids so we dont accidently remove one of the previously updated default mapper - for default_mapper in default_mappers: - if not default_mapper['id'] in [x['id'] for x in updated_mappers]: - kc.delete_component(default_mapper['id'], realm) + if module.params['remove_unspecified_mappers']: + # we remove all unwanted default mappers + # we use ids so we dont accidently remove one of the previously updated default mapper + for default_mapper in default_mappers: + if not default_mapper['id'] in [x['id'] for x in updated_mappers]: + kc.delete_component(default_mapper['id'], realm) after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) if module._diff: From 4598758419a1803f24664ee79b45eb0167d284a0 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Aug 2024 02:58:03 +1200 Subject: [PATCH 210/482] MH cause_changes: deprecate params (#8791) * MH cause_changes: deprecate params * add changelog frag * Update changelogs/fragments/8791-mh-cause-changes-param-depr.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../8791-mh-cause-changes-param-depr.yml | 4 ++++ plugins/module_utils/mh/deco.py | 1 + plugins/modules/jira.py | 16 ++++++++-------- .../plugins/module_utils/test_module_helper.py | 5 +++++ 4 files changed, 18 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8791-mh-cause-changes-param-depr.yml diff --git a/changelogs/fragments/8791-mh-cause-changes-param-depr.yml b/changelogs/fragments/8791-mh-cause-changes-param-depr.yml new file mode 100644 index 0000000000..7f7935af14 --- /dev/null +++ b/changelogs/fragments/8791-mh-cause-changes-param-depr.yml @@ -0,0 +1,4 @@ +minor_changes: + - jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791). +deprecated_features: + - MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index ecfebfd769..c7b63b7050 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -14,6 +14,7 @@ from ansible_collections.community.general.plugins.module_utils.mh.exceptions im def cause_changes(on_success=None, on_failure=None, when=None): + # Parameters on_success and on_failure are deprecated and should be removed in community.general 12.0.0 def deco(func): @wraps(func) diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index db3eca06bb..0bb95158f7 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -544,7 +544,7 @@ class JIRA(StateModuleHelper): self.vars.uri = self.vars.uri.strip('/') self.vars.set('restbase', self.vars.uri + '/rest/api/2') - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_create(self): createfields = { 'project': {'key': self.vars.project}, @@ -562,7 +562,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_comment(self): data = { 'body': self.vars.comment @@ -578,7 +578,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_worklog(self): data = { 'comment': self.vars.comment @@ -594,7 +594,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_edit(self): data = { 'fields': self.vars.fields @@ -602,7 +602,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue self.vars.meta = self.put(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_update(self): data = { "update": self.vars.fields, @@ -624,7 +624,7 @@ class JIRA(StateModuleHelper): self.vars.meta = self.get(url) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_transition(self): # Find the transition id turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" @@ -657,7 +657,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_link(self): data = { 'type': {'name': self.vars.linktype}, @@ -667,7 +667,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issueLink/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_attach(self): v = self.vars filename = v.attachment.get('filename') diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index d329765051..b1e2eafc7f 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -119,6 +119,11 @@ def test_variable_meta_change(): assert vd.has_changed('d') +# +# DEPRECATION NOTICE +# Parameters on_success and on_failure are deprecated and will be removed in community.genral 12.0.0 +# Remove testcases with those params when releasing 12.0.0 +# CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed'] CAUSE_CHG_DECO = dict( none_succ=dict(deco_args={}, expect_exception=False, expect_changed=None), From 573a7b97c699c46e8bb37d7b4ea52d711f1b4c07 Mon Sep 17 00:00:00 2001 From: Veikko Virrankoski <71337077+vvirrank@users.noreply.github.com> Date: Sun, 25 Aug 2024 18:01:05 +0300 Subject: [PATCH 211/482] Fix gitlab_project container_expiration_policy for project create (#8790) * Fix gitlab_project container_expiration_policy for project create * Check for container_expiration_policy presence before renaming it * Add missing links to changelog fragment * Fix changelog grammar --- ...-gitlab_project-fix-cleanup-policy-on-project-create.yml | 3 +++ plugins/modules/gitlab_project.py | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml diff --git a/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml b/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml new file mode 100644 index 0000000000..ba171a1178 --- /dev/null +++ b/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml @@ -0,0 +1,3 @@ +bugfixes: + - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790). + - gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790). diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index 7b53f8639c..c5d2278ba0 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -521,6 +521,8 @@ class GitLabProject(object): return True arguments['namespace_id'] = namespace.id + if 'container_expiration_policy' in arguments: + arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] try: project = self._gitlab.projects.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: @@ -548,9 +550,9 @@ class GitLabProject(object): for arg_key, arg_value in arguments.items(): if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: + if getattr(project, arg_key, None) != arguments[arg_key]: if arg_key == 'container_expiration_policy': - old_val = getattr(project, arg_key) + old_val = getattr(project, arg_key, {}) final_val = {key: value for key, value in arg_value.items() if value is not None} if final_val.get('older_than') == '0d': From 9c9c4cbc3ef9ef2e1a6c9312e4ed97ffcbab276e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Aug 2024 20:21:09 +1200 Subject: [PATCH 212/482] pipx/pipx_info: add parameter `global` (#8793) * pipx/pipx_info: add new parameters * add test for --global, refactor int test main file * ensure initial state of test * ensure PATH includes /usr/local/bin * ensure PATH includes /usr/local/bin for entire block * ensure minimum version of pip * ensure pipx 1.6.0 is installed * push recommendation for pipx 1.7.0 instead of 1.6.0 * add changelog frag * add deprecatons to changelog frag * add deprecatons to changelog frag, better * Update changelogs/fragments/8793-pipx-global.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/8793-pipx-global.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8793-pipx-global.yml | 12 ++ plugins/module_utils/pipx.py | 37 ++-- plugins/modules/pipx.py | 75 ++++---- plugins/modules/pipx_info.py | 30 ++- tests/integration/targets/pipx/tasks/main.yml | 171 +++--------------- .../targets/pipx/tasks/testcase-7497.yml | 27 +++ .../targets/pipx/tasks/testcase-8656.yml | 35 ++++ .../pipx/tasks/testcase-8793-global.yml | 58 ++++++ .../targets/pipx/tasks/testcase-injectpkg.yml | 49 +++++ .../targets/pipx/tasks/testcase-jupyter.yml | 28 +++ .../pipx/tasks/testcase-oldsitewide.yml | 40 ++++ 11 files changed, 355 insertions(+), 207 deletions(-) create mode 100644 changelogs/fragments/8793-pipx-global.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-7497.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-8656.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-8793-global.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-injectpkg.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-jupyter.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml diff --git a/changelogs/fragments/8793-pipx-global.yml b/changelogs/fragments/8793-pipx-global.yml new file mode 100644 index 0000000000..c3d7f5157f --- /dev/null +++ b/changelogs/fragments/8793-pipx-global.yml @@ -0,0 +1,12 @@ +minor_changes: + - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). + - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). +deprecated_features: + - > + pipx - + support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 + (https://github.com/ansible-collections/community.general/pull/8793). + - > + pipx_info - + support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 + (https://github.com/ansible-collections/community.general/pull/8793). diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 3f493545d5..054de886a4 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -24,26 +24,29 @@ _state_map = dict( def pipx_runner(module, command, **kwargs): + arg_formats = dict( + state=fmt.as_map(_state_map), + name=fmt.as_list(), + name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=fmt.as_bool("--include-apps"), + install_deps=fmt.as_bool("--include-deps"), + inject_packages=fmt.as_list(), + force=fmt.as_bool("--force"), + include_injected=fmt.as_bool("--include-injected"), + index_url=fmt.as_opt_val('--index-url'), + python=fmt.as_opt_val('--python'), + system_site_packages=fmt.as_bool("--system-site-packages"), + _list=fmt.as_fixed(['list', '--include-injected', '--json']), + editable=fmt.as_bool("--editable"), + pip_args=fmt.as_opt_eq_val('--pip-args'), + suffix=fmt.as_opt_val('--suffix'), + ) + arg_formats["global"] = fmt.as_bool("--global") + runner = CmdRunner( module, command=command, - arg_formats=dict( - state=fmt.as_map(_state_map), - name=fmt.as_list(), - name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), - install_apps=fmt.as_bool("--include-apps"), - install_deps=fmt.as_bool("--include-deps"), - inject_packages=fmt.as_list(), - force=fmt.as_bool("--force"), - include_injected=fmt.as_bool("--include-injected"), - index_url=fmt.as_opt_val('--index-url'), - python=fmt.as_opt_val('--python'), - system_site_packages=fmt.as_bool("--system-site-packages"), - _list=fmt.as_fixed(['list', '--include-injected', '--json']), - editable=fmt.as_bool("--editable"), - pip_args=fmt.as_opt_eq_val('--pip-args'), - suffix=fmt.as_opt_val('--suffix'), - ), + arg_formats=arg_formats, environ_update={'USE_EMOJI': '0'}, check_rc=True, **kwargs diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 7f4954850f..1a73ae00bd 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -117,11 +117,19 @@ options: suffix: description: - Optional suffix for virtual environment and executable names. - - "B(Warning): C(pipx) documentation states this is an B(experimental) feature subject to change." + - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change." type: str version_added: 9.3.0 + global: + description: + - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. + - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. + Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version or newer. + type: bool + default: false + version_added: 9.4.0 notes: - - This module requires C(pipx) version 0.16.2.1 or above. + - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). - Please note that C(pipx) requires Python 3.6 or above. - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. @@ -191,26 +199,29 @@ def _make_name(name, suffix): class PipX(StateModuleHelper): output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + argument_spec = dict( + state=dict(type='str', default='install', + choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', + 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']), + name=dict(type='str'), + source=dict(type='str'), + install_apps=dict(type='bool', default=False), + install_deps=dict(type='bool', default=False), + inject_packages=dict(type='list', elements='str'), + force=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + index_url=dict(type='str'), + python=dict(type='str'), + system_site_packages=dict(type='bool', default=False), + executable=dict(type='path'), + editable=dict(type='bool', default=False), + pip_args=dict(type='str'), + suffix=dict(type='str'), + ) + argument_spec["global"] = dict(type='bool', default=False) + module = dict( - argument_spec=dict( - state=dict(type='str', default='install', - choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', - 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']), - name=dict(type='str'), - source=dict(type='str'), - install_apps=dict(type='bool', default=False), - install_deps=dict(type='bool', default=False), - inject_packages=dict(type='list', elements='str'), - force=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - index_url=dict(type='str'), - python=dict(type='str'), - system_site_packages=dict(type='bool', default=False), - executable=dict(type='path'), - editable=dict(type='bool', default=False), - pip_args=dict(type='str'), - suffix=dict(type='str'), - ), + argument_spec=argument_spec, required_if=[ ('state', 'present', ['name']), ('state', 'install', ['name']), @@ -279,8 +290,8 @@ class PipX(StateModuleHelper): def state_install(self): if not self.vars.application or self.vars.force: self.changed = True - args = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' - with self.runner(args, check_mode_skip=True) as ctx: + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) @@ -293,14 +304,14 @@ class PipX(StateModuleHelper): if self.vars.force: self.changed = True - with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) def state_uninstall(self): if self.vars.application: name = _make_name(self.vars.name, self.vars.suffix) - with self.runner('state name', check_mode_skip=True) as ctx: + with self.runner('state global name', check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) @@ -311,7 +322,7 @@ class PipX(StateModuleHelper): if not self.vars.application: self.do_raise("Trying to reinstall a non-existent application: {0}".format(name)) self.changed = True - with self.runner('state name python', check_mode_skip=True) as ctx: + with self.runner('state global name python', check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) @@ -321,32 +332,32 @@ class PipX(StateModuleHelper): self.do_raise("Trying to inject packages into a non-existent application: {0}".format(name)) if self.vars.force: self.changed = True - with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + with self.runner('state global index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) def state_uninstall_all(self): - with self.runner('state', check_mode_skip=True) as ctx: + with self.runner('state global', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_reinstall_all(self): - with self.runner('state python', check_mode_skip=True) as ctx: + with self.runner('state global python', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_upgrade_all(self): if self.vars.force: self.changed = True - with self.runner('state include_injected force', check_mode_skip=True) as ctx: + with self.runner('state global include_injected force', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True - args = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' - with self.runner(args, check_mode_skip=True) as ctx: + args_order = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 992ca79419..dee3125da2 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -47,14 +47,22 @@ options: If not specified, the module will use C(python -m pipx) to run the tool, using the same Python interpreter as ansible itself. type: path + global: + description: + - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. + - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. + Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version or newer. + type: bool + default: false + version_added: 9.3.0 notes: + - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). + - Please note that C(pipx) requires Python 3.6 or above. - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - > This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using the R(environment Ansible keyword, playbooks_environment). - - This module requires C(pipx) version 0.16.2.1 or above. - - Please note that C(pipx) requires Python 3.6 or above. - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). author: - "Alexei Znamensky (@russoz)" @@ -140,14 +148,16 @@ from ansible.module_utils.facts.compat import ansible_facts class PipXInfo(ModuleHelper): output_params = ['name'] + argument_spec = dict( + name=dict(type='str'), + include_deps=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + include_raw=dict(type='bool', default=False), + executable=dict(type='path'), + ) + argument_spec["global"] = dict(type='bool', default=False) module = dict( - argument_spec=dict( - name=dict(type='str'), - include_deps=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - include_raw=dict(type='bool', default=False), - executable=dict(type='path'), - ), + argument_spec=argument_spec, supports_check_mode=True, ) use_old_vardict = False @@ -195,7 +205,7 @@ class PipXInfo(ModuleHelper): return results - with self.runner('_list', output_process=process_list) as ctx: + with self.runner('_list global', output_process=process_list) as ctx: self.vars.application = ctx.run(_list=1) self._capture_results(ctx) diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index aee8948b90..f1a993aa56 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -217,158 +217,33 @@ - "'tox' not in uninstall_tox_again.application" ############################################################################## -- name: ensure application pylint is uninstalled - community.general.pipx: - name: pylint - state: absent -- name: install application pylint - community.general.pipx: - name: pylint - register: install_pylint +- name: Include testcase for inject packages + ansible.builtin.include_tasks: testcase-injectpkg.yml -- name: inject packages - community.general.pipx: - state: inject - name: pylint - inject_packages: - - licenses - register: inject_pkgs_pylint +- name: Include testcase for jupyter + ansible.builtin.include_tasks: testcase-jupyter.yml -- name: inject packages with apps - community.general.pipx: - state: inject - name: pylint - inject_packages: - - black - install_apps: true - register: inject_pkgs_apps_pylint +- name: Include testcase for old site-wide + ansible.builtin.include_tasks: testcase-oldsitewide.yml -- name: cleanup pylint - community.general.pipx: - state: absent - name: pylint - register: uninstall_pylint +- name: Include testcase for issue 7497 + ansible.builtin.include_tasks: testcase-7497.yml -- name: check assertions inject_packages - assert: - that: - - install_pylint is changed - - inject_pkgs_pylint is changed - - '"pylint" in inject_pkgs_pylint.application' - - '"licenses" in inject_pkgs_pylint.application["pylint"]["injected"]' - - inject_pkgs_apps_pylint is changed - - '"pylint" in inject_pkgs_apps_pylint.application' - - '"black" in inject_pkgs_apps_pylint.application["pylint"]["injected"]' - - uninstall_pylint is changed +- name: Include testcase for issue 8656 + ansible.builtin.include_tasks: testcase-8656.yml -############################################################################## -- name: install jupyter - not working smoothly in freebsd - # when: ansible_system != 'FreeBSD' +- name: install pipx + pip: + name: pipx>=1.7.0 + extra_args: --user + ignore_errors: true + register: pipx170_install + +- name: Recent features + when: + - pipx170_install is not failed + - pipx170_install is changed block: - - name: ensure application mkdocs is uninstalled - community.general.pipx: - name: mkdocs - state: absent - - - name: install application mkdocs - community.general.pipx: - name: mkdocs - install_deps: true - register: install_mkdocs - - - name: cleanup mkdocs - community.general.pipx: - state: absent - name: mkdocs - - - name: check assertions - assert: - that: - - install_mkdocs is changed - - '"markdown_py" in install_mkdocs.stdout' - -############################################################################## -- name: ensure /opt/pipx - ansible.builtin.file: - path: /opt/pipx - state: directory - mode: 0755 - -- name: install tox site-wide - community.general.pipx: - name: tox - state: latest - register: install_tox_sitewide - environment: - PIPX_HOME: /opt/pipx - PIPX_BIN_DIR: /usr/local/bin - -- name: stat /usr/local/bin/tox - ansible.builtin.stat: - path: /usr/local/bin/tox - register: usrlocaltox - -- name: check assertions - ansible.builtin.assert: - that: - - install_tox_sitewide is changed - - usrlocaltox.stat.exists - -############################################################################## -# Test for issue 7497 -- name: ensure application pyinstaller is uninstalled - community.general.pipx: - name: pyinstaller - state: absent - -- name: Install Python Package pyinstaller - community.general.pipx: - name: pyinstaller - state: present - system_site_packages: true - pip_args: "--no-cache-dir" - register: install_pyinstaller - -- name: cleanup pyinstaller - community.general.pipx: - name: pyinstaller - state: absent - -- name: check assertions - assert: - that: - - install_pyinstaller is changed - -############################################################################## -# Test for issue 8656 -- name: ensure application conan2 is uninstalled - community.general.pipx: - name: conan2 - state: absent - -- name: Install Python Package conan with suffix 2 (conan2) - community.general.pipx: - name: conan - state: install - suffix: "2" - register: install_conan2 - -- name: Install Python Package conan with suffix 2 (conan2) again - community.general.pipx: - name: conan - state: install - suffix: "2" - register: install_conan2_again - -- name: cleanup conan2 - community.general.pipx: - name: conan2 - state: absent - -- name: check assertions - assert: - that: - - install_conan2 is changed - - "' - conan2' in install_conan2.stdout" - - install_conan2_again is not changed + - name: Include testcase for PR 8793 --global + ansible.builtin.include_tasks: testcase-8793-global.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-7497.yml b/tests/integration/targets/pipx/tasks/testcase-7497.yml new file mode 100644 index 0000000000..938196ef59 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-7497.yml @@ -0,0 +1,27 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: ensure application pyinstaller is uninstalled + community.general.pipx: + name: pyinstaller + state: absent + +- name: Install Python Package pyinstaller + community.general.pipx: + name: pyinstaller + state: present + system_site_packages: true + pip_args: "--no-cache-dir" + register: install_pyinstaller + +- name: cleanup pyinstaller + community.general.pipx: + name: pyinstaller + state: absent + +- name: check assertions + assert: + that: + - install_pyinstaller is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8656.yml b/tests/integration/targets/pipx/tasks/testcase-8656.yml new file mode 100644 index 0000000000..10e99e846e --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8656.yml @@ -0,0 +1,35 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: ensure application conan2 is uninstalled + community.general.pipx: + name: conan2 + state: absent + +- name: Install Python Package conan with suffix 2 (conan2) + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2 + +- name: Install Python Package conan with suffix 2 (conan2) again + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2_again + +- name: cleanup conan2 + community.general.pipx: + name: conan2 + state: absent + +- name: check assertions + assert: + that: + - install_conan2 is changed + - "' - conan2' in install_conan2.stdout" + - install_conan2_again is not changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml new file mode 100644 index 0000000000..7d3c871306 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml @@ -0,0 +1,58 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Remove global pipx dir + ansible.builtin.file: + path: /opt/pipx + state: absent + force: true + + - name: Create global pipx dir + ansible.builtin.file: + path: /opt/pipx + state: directory + mode: '0755' + + - name: Uninstall pycowsay + community.general.pipx: + state: uninstall + name: pycowsay + + - name: Uninstall pycowsay (global) + community.general.pipx: + state: uninstall + name: pycowsay + global: true + + - name: Run pycowsay (should fail) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + ignore_errors: true + + - name: Install pycowsay (global) + community.general.pipx: + state: install + name: pycowsay + global: true + + - name: Run pycowsay (should succeed) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + register: what_the_cow_said + + - name: Which cow? + ansible.builtin.command: which pycowsay + changed_when: false + register: which_cow + + - name: Assert Moooooooo + ansible.builtin.assert: + that: + - "'Moooooooo!' in what_the_cow_said.stdout" + - "'/usr/local/bin/pycowsay' in which_cow.stdout" diff --git a/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml new file mode 100644 index 0000000000..60296024e4 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml @@ -0,0 +1,49 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + +- name: install application pylint + community.general.pipx: + name: pylint + register: install_pylint + +- name: inject packages + community.general.pipx: + state: inject + name: pylint + inject_packages: + - licenses + register: inject_pkgs_pylint + +- name: inject packages with apps + community.general.pipx: + state: inject + name: pylint + inject_packages: + - black + install_apps: true + register: inject_pkgs_apps_pylint + +- name: cleanup pylint + community.general.pipx: + state: absent + name: pylint + register: uninstall_pylint + +- name: check assertions inject_packages + assert: + that: + - install_pylint is changed + - inject_pkgs_pylint is changed + - '"pylint" in inject_pkgs_pylint.application' + - '"licenses" in inject_pkgs_pylint.application["pylint"]["injected"]' + - inject_pkgs_apps_pylint is changed + - '"pylint" in inject_pkgs_apps_pylint.application' + - '"black" in inject_pkgs_apps_pylint.application["pylint"]["injected"]' + - uninstall_pylint is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-jupyter.yml b/tests/integration/targets/pipx/tasks/testcase-jupyter.yml new file mode 100644 index 0000000000..e4b5d48dd5 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-jupyter.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: install jupyter + block: + - name: ensure application mkdocs is uninstalled + community.general.pipx: + name: mkdocs + state: absent + + - name: install application mkdocs + community.general.pipx: + name: mkdocs + install_deps: true + register: install_mkdocs + + - name: cleanup mkdocs + community.general.pipx: + state: absent + name: mkdocs + + - name: check assertions + assert: + that: + - install_mkdocs is changed + - '"markdown_py" in install_mkdocs.stdout' diff --git a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml new file mode 100644 index 0000000000..1db3e60406 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Ensure /opt/pipx + ansible.builtin.file: + path: /opt/pipx + state: directory + mode: 0755 + +- name: Install tox site-wide + community.general.pipx: + name: tox + state: latest + register: install_tox_sitewide + environment: + PIPX_HOME: /opt/pipx + PIPX_BIN_DIR: /usr/local/bin + +- name: stat /usr/local/bin/tox + ansible.builtin.stat: + path: /usr/local/bin/tox + register: usrlocaltox + +- name: Uninstall tox site-wide + community.general.pipx: + name: tox + state: uninstall + register: uninstall_tox_sitewide + environment: + PIPX_HOME: /opt/pipx + PIPX_BIN_DIR: /usr/local/bin + +- name: check assertions + ansible.builtin.assert: + that: + - install_tox_sitewide is changed + - usrlocaltox.stat.exists + - uninstall_tox_sitewide is changed From e9071e9871188fc8d5d7d8ac541b1fdeda010426 Mon Sep 17 00:00:00 2001 From: Veikko Virrankoski <71337077+vvirrank@users.noreply.github.com> Date: Mon, 26 Aug 2024 11:21:52 +0300 Subject: [PATCH 213/482] Fix gitlab access token crash in check mode for new tokens (#8796) Fix crash in check mode when attempting to create a new gitlab access token --- changelogs/fragments/8796-gitlab-access-token-check-mode.yml | 3 +++ plugins/modules/gitlab_group_access_token.py | 5 ++++- plugins/modules/gitlab_project_access_token.py | 5 ++++- 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8796-gitlab-access-token-check-mode.yml diff --git a/changelogs/fragments/8796-gitlab-access-token-check-mode.yml b/changelogs/fragments/8796-gitlab-access-token-check-mode.yml new file mode 100644 index 0000000000..6585584fac --- /dev/null +++ b/changelogs/fragments/8796-gitlab-access-token-check-mode.yml @@ -0,0 +1,3 @@ +bugfixes: + - gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). + - gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py index 85bba205db..1db7414081 100644 --- a/plugins/modules/gitlab_group_access_token.py +++ b/plugins/modules/gitlab_group_access_token.py @@ -313,7 +313,10 @@ def main(): module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) else: gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) if __name__ == '__main__': diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py index e692a30577..9bfbc51cc7 100644 --- a/plugins/modules/gitlab_project_access_token.py +++ b/plugins/modules/gitlab_project_access_token.py @@ -311,7 +311,10 @@ def main(): module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) else: gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) if __name__ == '__main__': From 96d5e6e50e972ae1bfc61cf1fb71c0738230c213 Mon Sep 17 00:00:00 2001 From: Shubham Singh Sugara <37795429+shubhamsugara22@users.noreply.github.com> Date: Mon, 26 Aug 2024 23:39:19 +0530 Subject: [PATCH 214/482] copr: add includepkgs functionality (#8779) * Limit package for Copr using includepkgs * Limit package for Copr using includepkgs * Limit package for Copr using includepkgs * Limit package for Copr using includepkgs * Limit package for Copr using includepkgs * Added changes in copr module * Excludepkgs parameter add * Update module and params to handle a list + Docs updated * Update module and params to handle a list + Docs updated --- .../8738-limit-packages-for-copr.yml | 2 ++ plugins/modules/copr.py | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 changelogs/fragments/8738-limit-packages-for-copr.yml diff --git a/changelogs/fragments/8738-limit-packages-for-copr.yml b/changelogs/fragments/8738-limit-packages-for-copr.yml new file mode 100644 index 0000000000..0e49cc5cd9 --- /dev/null +++ b/changelogs/fragments/8738-limit-packages-for-copr.yml @@ -0,0 +1,2 @@ +minor_changes: + - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). \ No newline at end of file diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py index 157a6c1605..809064114a 100644 --- a/plugins/modules/copr.py +++ b/plugins/modules/copr.py @@ -52,6 +52,18 @@ options: for example V(epel-7-x86_64). Default chroot is determined by the operating system, version of the operating system, and architecture on which the module is run. type: str + includepkgs: + description: List of packages to include. + required: false + type: list + elements: str + version_added: 9.4.0 + excludepkgs: + description: List of packages to exclude. + required: false + type: list + elements: str + version_added: 9.4.0 """ EXAMPLES = r""" @@ -255,6 +267,12 @@ class CoprModule(object): """ if not repo_content: repo_content = self._download_repo_info() + if self.ansible_module.params["includepkgs"]: + includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) + repo_content = repo_content.rstrip('\n') + '\nincludepkgs={0}\n'.format(includepkgs_value) + if self.ansible_module.params["excludepkgs"]: + excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) + repo_content = repo_content.rstrip('\n') + '\nexcludepkgs={0}\n'.format(excludepkgs_value) if self._compare_repo_content(repo_filename_path, repo_content): return False if not self.check_mode: @@ -470,6 +488,8 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), + includepkgs=dict(type='list', elements="str", required=False), + excludepkgs=dict(type='list', elements="str", required=False), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params From 249126f42972edd1ec427c777588b9d5ccf4bfbb Mon Sep 17 00:00:00 2001 From: Simon Siebert Date: Sun, 1 Sep 2024 20:22:38 +0200 Subject: [PATCH 215/482] proxmox inventory: fixing possible concatenation error (#8794) * Fixing possible concatination error * Create 8794-Fixing-possible-concatination-error.yaml * Update 8794-Fixing-possible-concatination-error.yaml Updating changelog --- .../fragments/8794-Fixing-possible-concatination-error.yaml | 2 ++ plugins/inventory/proxmox.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8794-Fixing-possible-concatination-error.yaml diff --git a/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml b/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml new file mode 100644 index 0000000000..a94eace415 --- /dev/null +++ b/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index a4b05b57ed..edfadfd8ad 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -329,8 +329,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): data = json['data'] break else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] + if json['data']: + # /hosts 's 'results' is a list of all hosts, returned is paginated + data = data + json['data'] break self._cache[self.cache_key][url] = data From 593d302f0b92228be71b476785dfadb30a0159d6 Mon Sep 17 00:00:00 2001 From: Stefan Birkner Date: Sun, 1 Sep 2024 20:22:46 +0200 Subject: [PATCH 216/482] Fix and enable test for datadog_downtime (#8815) The test has been disabled because it started to fail after an update of the Datadog API client. The issue itself (id cannot be set in constructor) and other issues (module name, additional attribute in API responses) are now fixed. The test is now working fine again. Fixes #3219. --- ...e.py.disabled => test_datadog_downtime.py} | 50 ++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) rename tests/unit/plugins/modules/{test_datadog_downtime.py.disabled => test_datadog_downtime.py} (86%) diff --git a/tests/unit/plugins/modules/test_datadog_downtime.py.disabled b/tests/unit/plugins/modules/test_datadog_downtime.py similarity index 86% rename from tests/unit/plugins/modules/test_datadog_downtime.py.disabled rename to tests/unit/plugins/modules/test_datadog_downtime.py index 52f27710cf..e1ecbfa66f 100644 --- a/tests/unit/plugins/modules/test_datadog_downtime.py.disabled +++ b/tests/unit/plugins/modules/test_datadog_downtime.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.plugins.modules.monitoring.datadog import datadog_downtime +from ansible_collections.community.general.plugins.modules import datadog_downtime from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args @@ -36,7 +36,7 @@ class TestDatadogDowntime(ModuleTestCase): set_module_args({}) self.module.main() - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_no_id(self, downtimes_api_mock): set_module_args({ "monitor_tags": ["foo:bar"], @@ -60,10 +60,11 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - create_downtime_mock = MagicMock(return_value=Downtime(id=12345)) + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock) with self.assertRaises(AnsibleExitJson) as result: self.module.main() @@ -71,7 +72,7 @@ class TestDatadogDowntime(ModuleTestCase): self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) create_downtime_mock.assert_called_once_with(downtime) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -96,11 +97,16 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - create_downtime_mock = MagicMock(return_value=Downtime(id=12345)) - get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=True)) + disabled_downtime = Downtime() + disabled_downtime.disabled = True + disabled_downtime.id = 1212 + + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) + get_downtime_mock = MagicMock(return_value=disabled_downtime) downtimes_api_mock.return_value = MagicMock( create_downtime=create_downtime_mock, get_downtime=get_downtime_mock ) @@ -111,7 +117,7 @@ class TestDatadogDowntime(ModuleTestCase): create_downtime_mock.assert_called_once_with(downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_when_not_disabled(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -136,11 +142,16 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - update_downtime_mock = MagicMock(return_value=Downtime(id=1212)) - get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=False)) + enabled_downtime = Downtime() + enabled_downtime.disabled = False + enabled_downtime.id = 1212 + + update_downtime_mock = MagicMock(return_value=self.__downtime_with_id(1212)) + get_downtime_mock = MagicMock(return_value=enabled_downtime) downtimes_api_mock.return_value = MagicMock( update_downtime=update_downtime_mock, get_downtime=get_downtime_mock ) @@ -151,7 +162,7 @@ class TestDatadogDowntime(ModuleTestCase): update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_no_change(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -176,7 +187,8 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) downtime_get = Downtime() @@ -205,7 +217,7 @@ class TestDatadogDowntime(ModuleTestCase): update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_delete_downtime(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -215,12 +227,16 @@ class TestDatadogDowntime(ModuleTestCase): }) cancel_downtime_mock = MagicMock() - get_downtime_mock = MagicMock(return_value=Downtime(id=1212)) downtimes_api_mock.return_value = MagicMock( - get_downtime=get_downtime_mock, + get_downtime=self.__downtime_with_id, cancel_downtime=cancel_downtime_mock ) with self.assertRaises(AnsibleExitJson) as result: self.module.main() self.assertTrue(result.exception.args[0]['changed']) cancel_downtime_mock.assert_called_once_with(1212) + + def __downtime_with_id(self, id): + downtime = Downtime() + downtime.id = id + return downtime From ecc048bc12b5235247aecbc7c113029a4d3a517c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 2 Sep 2024 06:22:53 +1200 Subject: [PATCH 217/482] Use dict comprehension in plugins (#8814) * use dict comprehension in plugins * Apply suggestions from code review * add changelog frag * fix references in changelog frag --- .../fragments/8814-dict-comprehension.yml | 23 ++++++++++++++++ plugins/filter/hashids.py | 2 +- plugins/filter/keep_keys.py | 2 +- plugins/filter/remove_keys.py | 2 +- plugins/filter/replace_keys.py | 2 +- plugins/module_utils/csv.py | 2 +- plugins/module_utils/mh/mixins/vars.py | 4 +-- plugins/module_utils/vardict.py | 10 +++---- plugins/modules/apache2_mod_proxy.py | 2 +- plugins/modules/gitlab_group.py | 2 +- plugins/modules/keycloak_client.py | 2 +- plugins/modules/keycloak_clientscope.py | 2 +- plugins/modules/keycloak_identity_provider.py | 2 +- plugins/modules/keycloak_user_federation.py | 8 +++--- plugins/modules/linode.py | 2 +- plugins/modules/lxd_container.py | 26 +++++++++++++------ plugins/modules/manageiq_provider.py | 2 +- plugins/modules/one_service.py | 2 +- plugins/modules/one_vm.py | 4 +-- plugins/modules/proxmox.py | 4 +-- plugins/modules/proxmox_disk.py | 11 +++++--- plugins/modules/proxmox_kvm.py | 6 ++--- plugins/plugin_utils/unsafe.py | 2 +- tests/sanity/extra/botmeta.py | 2 +- 24 files changed, 81 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/8814-dict-comprehension.yml diff --git a/changelogs/fragments/8814-dict-comprehension.yml b/changelogs/fragments/8814-dict-comprehension.yml new file mode 100644 index 0000000000..01b5da4bae --- /dev/null +++ b/changelogs/fragments/8814-dict-comprehension.yml @@ -0,0 +1,23 @@ +minor_changes: + - hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index 45fba83c03..ac771e6219 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -27,7 +27,7 @@ def initialize_hashids(**kwargs): if not HAS_HASHIDS: raise AnsibleError("The hashids library must be installed in order to use this plugin") - params = dict((k, v) for k, v in kwargs.items() if v) + params = {k: v for k, v in kwargs.items() if v} try: return Hashids(**params) diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py index dffccba356..97b706a950 100644 --- a/plugins/filter/keep_keys.py +++ b/plugins/filter/keep_keys.py @@ -127,7 +127,7 @@ def keep_keys(data, target=None, matching_parameter='equal'): def keep_key(key): return tt.match(key) is not None - return [dict((k, v) for k, v in d.items() if keep_key(k)) for d in data] + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] class FilterModule(object): diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py index cabce14682..7a4d912d34 100644 --- a/plugins/filter/remove_keys.py +++ b/plugins/filter/remove_keys.py @@ -127,7 +127,7 @@ def remove_keys(data, target=None, matching_parameter='equal'): def keep_key(key): return tt.match(key) is None - return [dict((k, v) for k, v in d.items() if keep_key(k)) for d in data] + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] class FilterModule(object): diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py index d3b12c05d0..70b264eba6 100644 --- a/plugins/filter/replace_keys.py +++ b/plugins/filter/replace_keys.py @@ -169,7 +169,7 @@ def replace_keys(data, target=None, matching_parameter='equal'): return a return key - return [dict((replace_key(k), v) for k, v in d.items()) for d in data] + return [{replace_key(k): v for k, v in d.items()} for d in data] class FilterModule(object): diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 200548a46d..46408e4877 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -43,7 +43,7 @@ def initialize_dialect(dialect, **kwargs): raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: csv.register_dialect('custom', dialect, **dialect_params) diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py index 1615609735..7db9904f93 100644 --- a/plugins/module_utils/mh/mixins/vars.py +++ b/plugins/module_utils/mh/mixins/vars.py @@ -113,7 +113,7 @@ class VarDict(object): self._meta[name] = meta def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) + return {k: v for k, v in self._data.items() if self.meta(k).output} def diff(self): diff_results = [(k, self.meta(k).diff_result) for k in self._data] @@ -125,7 +125,7 @@ class VarDict(object): return None def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) + facts_result = {k: v for k, v in self._data.items() if self._meta[k].fact} return facts_result if facts_result else None def change_vars(self): diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py index 51f802483d..9bd104ce37 100644 --- a/plugins/module_utils/vardict.py +++ b/plugins/module_utils/vardict.py @@ -175,18 +175,18 @@ class VarDict(object): self.__vars__[name] = var def output(self, verbosity=0): - return dict((n, v.value) for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)) + return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} def diff(self, verbosity=0): diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] if diff_results: - before = dict((n, dr['before']) for n, dr in diff_results) - after = dict((n, dr['after']) for n, dr in diff_results) + before = {n: dr['before'] for n, dr in diff_results} + after = {n: dr['after'] for n, dr in diff_results} return {'before': before, 'after': after} return None def facts(self, verbosity=0): - facts_result = dict((n, v.value) for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)) + facts_result = {n: v.value for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)} return facts_result if facts_result else None @property @@ -194,4 +194,4 @@ class VarDict(object): return any(var.has_changed for var in self.__vars__.values()) def as_dict(self): - return dict((name, var.value) for name, var in self.__vars__.items()) + return {name: var.value for name, var in self.__vars__.items()} diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index 8f561e8ae0..87e99bdd9a 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -286,7 +286,7 @@ class BalancerMember(object): 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} actual_status = str(self.attributes['Status']) - status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping)) + status = {mode: patt in actual_status for mode, patt in iteritems(status_mapping)} return status def set_member_status(self, values): diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 3d57b18528..1f4dadff70 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -261,7 +261,7 @@ class GitLabGroup(object): try: # Filter out None values - filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None) + filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} group = self._gitlab.groups.create(filtered) except (gitlab.exceptions.GitlabCreateError) as e: diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index efaa66e26d..d7e4fb0b7e 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -1006,7 +1006,7 @@ def main(): # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if client_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] elif client_param == 'authentication_flow_binding_overrides': new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index b8ee842195..576a831bdb 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -428,7 +428,7 @@ def main(): # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if clientscope_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] changeset[camel(clientscope_param)] = new_param_value # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index bb958d9e94..609673653b 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -534,7 +534,7 @@ def main(): # special handling of mappers list to allow change detection if module.params.get('mappers') is not None: for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) + change = {k: v for k, v in change.items() if v is not None} if change.get('id') is None and change.get('name') is None: module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') if before_idp == dict(): diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 22cd36a64f..05f884cc1e 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -724,7 +724,7 @@ from copy import deepcopy def sanitize(comp): compcopy = deepcopy(comp) if 'config' in compcopy: - compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) + compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} if 'bindCredential' in compcopy['config']: compcopy['config']['bindCredential'] = '**********' # an empty string is valid for krbPrincipalAttribute but is filtered out in diff @@ -733,7 +733,7 @@ def sanitize(comp): if 'mappers' in compcopy: for mapper in compcopy['mappers']: if 'config' in mapper: - mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) + mapper['config'] = {k: v[0] for k, v in mapper['config'].items()} return compcopy @@ -886,7 +886,7 @@ def main(): new_param_value = module.params.get(param) old_value = before_comp[camel(param)] if camel(param) in before_comp else None if param == 'mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] if new_param_value != old_value: changeset[camel(param)] = new_param_value @@ -895,7 +895,7 @@ def main(): if module.params['provider_id'] in ['kerberos', 'sssd']: module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) + change = {k: v for k, v in change.items() if v is not None} if change.get('id') is None and change.get('name') is None: module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') if cid is None: diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py index 9e04ac63da..9b0dabdff2 100644 --- a/plugins/modules/linode.py +++ b/plugins/modules/linode.py @@ -670,7 +670,7 @@ def main(): backupwindow=backupwindow, ) - kwargs = dict((k, v) for k, v in check_items.items() if v is not None) + kwargs = {k: v for k, v in check_items.items() if v is not None} # setup the auth try: diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index f44523a751..88e502e7c8 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -616,8 +616,15 @@ class LXDContainerManagement(object): def _instance_ipv4_addresses(self, ignore_devices=None): ignore_devices = ['lo'] if ignore_devices is None else ignore_devices data = (self._get_instance_state_json() or {}).get('metadata', None) or {} - network = dict((k, v) for k, v in (data.get('network', None) or {}).items() if k not in ignore_devices) - addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) + network = { + k: v + for k, v in data.get('network', {}).items() + if k not in ignore_devices + } + addresses = { + k: [a['address'] for a in v['addresses'] if a['family'] == 'inet'] + for k, v in network.items() + } return addresses @staticmethod @@ -748,19 +755,22 @@ class LXDContainerManagement(object): def run(self): """Run the main method.""" + def adjust_content(content): + return content if not isinstance(content, dict) else { + k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith('volatile.')) + } + try: if self.trust_password is not None: self.client.authenticate(self.trust_password) self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') self.old_instance_json = self._get_instance_json() - self.old_sections = dict( - (section, content) if not isinstance(content, dict) - else (section, dict((k, v) for k, v in content.items() - if not (self.ignore_volatile_options and k.startswith('volatile.')))) - for section, content in (self.old_instance_json.get('metadata', None) or {}).items() + self.old_sections = { + section: adjust_content(content) + for section, content in self.old_instance_json.get('metadata', {}).items() if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) - ) + } self.diff['before']['instance'] = self.old_sections # preliminary, will be overwritten in _apply_instance_configs() if called diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index e6ded9ea7a..af5c147f46 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -715,7 +715,7 @@ def delete_nulls(h): if isinstance(h, list): return [delete_nulls(i) for i in h] if isinstance(h, dict): - return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) + return {k: delete_nulls(v) for k, v in h.items() if v is not None} return h diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py index 81b42c0ecc..2c89e9b8ad 100644 --- a/plugins/modules/one_service.py +++ b/plugins/modules/one_service.py @@ -339,7 +339,7 @@ def get_service_info(module, auth, service): def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): # make sure that the values in custom_attrs dict are strings - custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) + custom_attrs_with_str = {k: str(v) for k, v in custom_attrs.items()} data = { "action": { diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py index 8ee9c85609..2f4ee25354 100644 --- a/plugins/modules/one_vm.py +++ b/plugins/modules/one_vm.py @@ -1559,11 +1559,11 @@ def main(): one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) if attributes: - attributes = dict((key.upper(), value) for key, value in attributes.items()) + attributes = {key.upper(): value for key, value in attributes.items()} check_attributes(module, attributes) if count_attributes: - count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + count_attributes = {key.upper(): value for key, value in count_attributes.items()} if not attributes: import copy module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 20e20e9a88..52d5a849f3 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -771,7 +771,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): ) # Remove all empty kwarg entries - kwargs = dict((key, val) for key, val in kwargs.items() if val is not None) + kwargs = {key: val for key, val in kwargs.items() if val is not None} if cpus is not None: kwargs["cpulimit"] = cpus @@ -842,7 +842,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} pve_version = self.version() diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py index 979e551336..a4a9dd8791 100644 --- a/plugins/modules/proxmox_disk.py +++ b/plugins/modules/proxmox_disk.py @@ -524,8 +524,11 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): # - Remove not defined args # - Ensure True and False converted to int. # - Remove unnecessary parameters - params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields) - params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool))) + params = { + k: int(v) if isinstance(v, bool) else v + for k, v in self.module.params.items() + if v is not None and k in self.create_update_fields + } return params def wait_till_complete_or_timeout(self, node_name, task_id): @@ -598,7 +601,7 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): if iso_image is not None: playbook_config['volume'] = iso_image # Values in params are numbers, but strings are needed to compare with disk_config - playbook_config = dict((k, str(v)) for k, v in playbook_config.items()) + playbook_config = {k: str(v) for k, v in playbook_config.items()} # Now compare old and new config to detect if changes are needed if proxmox_config == playbook_config: @@ -626,7 +629,7 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): params['format'] = self.module.params['format'] params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0 # Remove not defined args - params = dict((k, v) for k, v in params.items() if v is not None) + params = {k: v for k, v in params.items() if v is not None} if params.get('storage', False): disk_config = disk_conf_str_to_dict(vm_config[disk]) diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 71cbb51fc1..e2b708170e 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -970,7 +970,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} # Convert all dict in kwargs to elements. # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] @@ -996,7 +996,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} return proxmox_node.qemu(vmid).config.set(**kwargs) is None @@ -1031,7 +1031,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) version = self.version() diff --git a/plugins/plugin_utils/unsafe.py b/plugins/plugin_utils/unsafe.py index 1eb61bea0f..4fdb8b3d51 100644 --- a/plugins/plugin_utils/unsafe.py +++ b/plugins/plugin_utils/unsafe.py @@ -24,7 +24,7 @@ def make_unsafe(value): return value if isinstance(value, Mapping): - return dict((make_unsafe(key), make_unsafe(val)) for key, val in value.items()) + return {make_unsafe(key): make_unsafe(val) for key, val in value.items()} elif isinstance(value, Set): return set(make_unsafe(elt) for elt in value) elif is_sequence(value): diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index 459d3ba14d..d7828ebabb 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -190,7 +190,7 @@ def main(): try: for file, filedata in (botmeta.get('files') or {}).items(): file = convert_macros(file, macros) - filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items()) + filedata = {k: convert_macros(v, macros) for k, v in filedata.items()} files[file] = filedata for k, v in filedata.items(): if k in LIST_ENTRIES: From 7e978c77b4a1d3f317f65a544ec5f3051161fba0 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 6 Sep 2024 07:47:28 +1200 Subject: [PATCH 218/482] use dict comprehension in plugins, part 2 (#8822) * use dict comprehension in plugins * add changelog frag --- .../fragments/8822-dict-comprehension.yml | 21 +++++++++++++++++++ plugins/lookup/credstash.py | 6 +++--- .../identity/keycloak/keycloak.py | 3 +-- plugins/module_utils/mh/deco.py | 8 +++---- plugins/module_utils/redfish_utils.py | 18 +++++++++------- plugins/module_utils/scaleway.py | 11 ++++------ plugins/modules/etcd3.py | 9 ++------ plugins/modules/gitlab_project.py | 10 +++------ plugins/modules/hwc_ecs_instance.py | 3 +-- plugins/modules/hwc_evs_disk.py | 3 +-- plugins/modules/hwc_vpc_eip.py | 3 +-- plugins/modules/hwc_vpc_peering_connect.py | 3 +-- plugins/modules/hwc_vpc_port.py | 3 +-- plugins/modules/hwc_vpc_subnet.py | 6 ++---- plugins/modules/ipa_otptoken.py | 4 +--- plugins/modules/keycloak_user_federation.py | 7 +++++-- plugins/modules/lxc_container.py | 6 +++--- plugins/modules/proxmox_kvm.py | 4 ++-- plugins/modules/scaleway_security_group.py | 6 +++--- plugins/modules/ufw.py | 2 +- plugins/modules/vmadm.py | 8 ++++--- 21 files changed, 76 insertions(+), 68 deletions(-) create mode 100644 changelogs/fragments/8822-dict-comprehension.yml diff --git a/changelogs/fragments/8822-dict-comprehension.yml b/changelogs/fragments/8822-dict-comprehension.yml new file mode 100644 index 0000000000..cefb673bb8 --- /dev/null +++ b/changelogs/fragments/8822-dict-comprehension.yml @@ -0,0 +1,21 @@ +minor_changes: + - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 6a3f58595b..fd284f55c8 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -120,10 +120,10 @@ class LookupModule(LookupBase): aws_secret_access_key = self.get_option('aws_secret_access_key') aws_session_token = self.get_option('aws_session_token') - context = dict( - (k, v) for k, v in kwargs.items() + context = { + k: v for k, v in kwargs.items() if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') - ) + } kwargs_pass = { 'profile_name': profile_name, diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 020b185a30..128b0fee13 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -185,8 +185,7 @@ def get_token(module_params): 'password': auth_password, } # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) + payload = {k: v for k, v in temp_payload.items() if v is not None} try: r = json.loads(to_native(open_url(auth_url, method='POST', validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index c7b63b7050..408891cb8e 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -45,11 +45,11 @@ def module_fails_on_exception(func): @wraps(func) def wrapper(self, *args, **kwargs): + def fix_key(k): + return k if k not in conflict_list else "_" + k + def fix_var_conflicts(output): - result = dict([ - (k if k not in conflict_list else "_" + k, v) - for k, v in output.items() - ]) + result = {fix_key(k): v for k, v in output.items()} return result try: diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index edfc612466..c79506075a 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -613,9 +613,11 @@ class RedfishUtils(object): ai = dict((p['Name'], p) for p in params if 'Name' in p) if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in action.items() + if k.endswith('@Redfish.AllowableValues') + } return ai def _get_allowable_values(self, action, name, default_values=None): @@ -2242,7 +2244,7 @@ class RedfishUtils(object): continue # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: + if data[u'Attributes'][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" @@ -2780,9 +2782,11 @@ class RedfishUtils(object): def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in data.items() + if k.endswith('@Redfish.AllowableValues') + } # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) if 'Inserted' not in payload and not image_only: diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 1310ba5602..2b21ec3793 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -51,11 +51,11 @@ def scaleway_waitable_resource_argument_spec(): def payload_from_object(scw_object): - return dict( - (k, v) + return { + k: v for k, v in scw_object.items() if k != 'id' and v is not None - ) + } class ScalewayException(Exception): @@ -117,10 +117,7 @@ class SecretVariables(object): @staticmethod def list_to_dict(source_list, hashed=False): key_value = 'hashed_value' if hashed else 'value' - return dict( - (var['key'], var[key_value]) - for var in source_list - ) + return {var['key']: var[key_value] for var in source_list} @classmethod def decode(cls, secrets_list, values_list): diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py index 2fdc3f2f83..b1bb181cf4 100644 --- a/plugins/modules/etcd3.py +++ b/plugins/modules/etcd3.py @@ -193,13 +193,8 @@ def run_module(): allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value + + client_params = {key: value for key, value in module.params.items() if key in allowed_keys} try: etcd = etcd3.client(**client_params) except Exception as exp: diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index c5d2278ba0..a85f2bd827 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -534,11 +534,7 @@ class GitLabProject(object): @param arguments Attributes of the project ''' def get_options_with_value(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - + ret_arguments = {k: v for k, v in arguments.items() if v is not None} return ret_arguments ''' @@ -549,8 +545,8 @@ class GitLabProject(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key, None) != arguments[arg_key]: + if arg_value is not None: + if getattr(project, arg_key, None) != arg_value: if arg_key == 'container_expiration_policy': old_val = getattr(project, arg_key, {}) final_val = {key: value for key, value in arg_value.items() if value is not None} diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py index 9ba95dc96d..cc6ef926dd 100644 --- a/plugins/modules/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -1163,8 +1163,7 @@ def send_delete_volume_request(module, params, client, info): path_parameters = { "volume_id": ["volume_id"], } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(info, path) for key, path in path_parameters.items()} url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py index 7d445ddd21..5f0e40b196 100644 --- a/plugins/modules/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -771,8 +771,7 @@ def async_wait(config, result, client, timeout): path_parameters = { "job_id": ["job_id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "jobs/{job_id}", data) diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py index 5c44319409..c3039ca2e5 100644 --- a/plugins/modules/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -547,8 +547,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "publicip_id": ["publicip", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "publicips/{publicip_id}", data) diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py index 2d6832ce5d..854d89e76a 100644 --- a/plugins/modules/hwc_vpc_peering_connect.py +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -407,8 +407,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "peering_id": ["peering", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py index 2d830493d4..08b1c0607d 100644 --- a/plugins/modules/hwc_vpc_port.py +++ b/plugins/modules/hwc_vpc_port.py @@ -560,8 +560,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "port_id": ["port", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "ports/{port_id}", data) diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py index 7ba7473301..ff6e425ca9 100644 --- a/plugins/modules/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -440,8 +440,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) @@ -538,8 +537,7 @@ def async_wait_update(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py index 567674f935..d8a5b3cf1d 100644 --- a/plugins/modules/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -392,9 +392,7 @@ def ensure(module, client): 'counter': 'ipatokenhotpcounter'} # Create inverse dictionary for mapping return values - ipa_to_ansible = {} - for (k, v) in ansible_to_ipa.items(): - ipa_to_ansible[v] = k + ipa_to_ansible = {v: k for k, v in ansible_to_ipa.items()} unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', 'digits', 'offset', 'interval', 'counter'] diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 05f884cc1e..3f65af3a93 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -847,8 +847,11 @@ def main(): # Keycloak API expects config parameters to be arrays containing a single string element if config is not None: - module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in config.items() if config[k] is not None) + module.params['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in config.items() + if config[k] is not None + } if mappers is not None: for mapper in mappers: diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py index 7ded041e93..2d768eaafd 100644 --- a/plugins/modules/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -683,11 +683,11 @@ class LxcContainerManagement(object): variables.pop(v, None) false_values = BOOLEANS_FALSE.union([None, '']) - result = dict( - (v, self.module.params[k]) + result = { + v: self.module.params[k] for k, v in variables.items() if self.module.params[k] not in false_values - ) + } return result def _config(self): diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index e2b708170e..771ddd902f 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -1032,7 +1032,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. kwargs = {k: v for k, v in kwargs.items() if v is not None} - kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) + kwargs.update({k: int(v) for k, v in kwargs.items() if isinstance(v, bool)}) version = self.version() pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] @@ -1163,7 +1163,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): for param in valid_clone_params: if self.module.params[param] is not None: clone_params[param] = self.module.params[param] - clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) + clone_params.update({k: int(v) for k, v in clone_params.items() if isinstance(v, bool)}) taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) else: taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index c09bc34bad..0c7f3f6ec6 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -135,11 +135,11 @@ from uuid import uuid4 def payload_from_security_group(security_group): - return dict( - (k, v) + return { + k: v for k, v in security_group.items() if k != 'id' and v is not None - ) + } def present_strategy(api, security_group): diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index 5d187793bd..7a90647979 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -446,7 +446,7 @@ def main(): params = module.params - commands = dict((key, params[key]) for key in command_keys if params[key]) + commands = {key: params[key] for key in command_keys if params[key]} # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py index bfe6148375..923a902bcf 100644 --- a/plugins/modules/vmadm.py +++ b/plugins/modules/vmadm.py @@ -558,9 +558,11 @@ def create_payload(module, uuid): # Filter out the few options that are not valid VM properties. module_options = ['force', 'state'] - # @TODO make this a simple {} comprehension as soon as py2 is ditched - # @TODO {k: v for k, v in p.items() if k not in module_options} - vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v]) + vmdef = { + k: v + for k, v in module.params.items() + if k not in module_options and v + } try: vmdef_json = json.dumps(vmdef) From 43f8adf1a5c7184310bc6ef7deee6fd8ebb5d9c1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 7 Sep 2024 09:49:16 +1200 Subject: [PATCH 219/482] pipx: add new states (#8809) * ensure minimum version of pip * ensure pipx 1.7.0 is installed * pipx: add new states/params * add tests * add license to json file * Update plugins/modules/pipx.py Co-authored-by: Felix Fontein * fix uninject tests * add changelog frag * fix doc per review * refactor license out of pipx spec file * Update plugins/modules/pipx.py Co-authored-by: Felix Fontein * Update tests/integration/targets/pipx/files/spec.json.license Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8809-pipx-new-params.yml | 2 + plugins/module_utils/pipx.py | 5 + plugins/modules/pipx.py | 69 +++++++++++++- .../integration/targets/pipx/files/spec.json | 91 +++++++++++++++++++ .../targets/pipx/files/spec.json.license | 3 + tests/integration/targets/pipx/tasks/main.yml | 9 ++ .../pipx/tasks/testcase-8809-installall.yml | 59 ++++++++++++ .../targets/pipx/tasks/testcase-8809-pin.yml | 69 ++++++++++++++ .../pipx/tasks/testcase-8809-uninjectpkg.yml | 69 ++++++++++++++ .../targets/pipx/tasks/testcase-injectpkg.yml | 12 +-- 10 files changed, 377 insertions(+), 11 deletions(-) create mode 100644 changelogs/fragments/8809-pipx-new-params.yml create mode 100644 tests/integration/targets/pipx/files/spec.json create mode 100644 tests/integration/targets/pipx/files/spec.json.license create mode 100644 tests/integration/targets/pipx/tasks/testcase-8809-installall.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-8809-pin.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml diff --git a/changelogs/fragments/8809-pipx-new-params.yml b/changelogs/fragments/8809-pipx-new-params.yml new file mode 100644 index 0000000000..775163e987 --- /dev/null +++ b/changelogs/fragments/8809-pipx-new-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 054de886a4..9ae7b5381c 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -11,15 +11,20 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor _state_map = dict( install='install', + install_all='install-all', present='install', uninstall='uninstall', absent='uninstall', uninstall_all='uninstall-all', inject='inject', + uninject='uninject', upgrade='upgrade', + upgrade_shared='upgrade-shared', upgrade_all='upgrade-all', reinstall='reinstall', reinstall_all='reinstall-all', + pin='pin', + unpin='unpin', ) diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 1a73ae00bd..38efc56ffc 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -26,13 +26,31 @@ attributes: options: state: type: str - choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all, latest] + choices: + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin default: install description: - Desired state for the application. - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), + make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0. name: type: str description: @@ -128,6 +146,13 @@ options: type: bool default: false version_added: 9.4.0 + spec_metadata: + description: + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) + with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + type: path + version_added: 9.4.0 notes: - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). - Please note that C(pipx) requires Python 3.6 or above. @@ -201,8 +226,10 @@ class PipX(StateModuleHelper): output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] argument_spec = dict( state=dict(type='str', default='install', - choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', - 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']), + choices=[ + 'present', 'absent', 'install', 'install_all', 'uninstall', 'uninstall_all', 'inject', 'uninject', + 'upgrade', 'upgrade_shared', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest', 'pin', 'unpin', + ]), name=dict(type='str'), source=dict(type='str'), install_apps=dict(type='bool', default=False), @@ -217,6 +244,7 @@ class PipX(StateModuleHelper): editable=dict(type='bool', default=False), pip_args=dict(type='str'), suffix=dict(type='str'), + spec_metadata=dict(type='path'), ) argument_spec["global"] = dict(type='bool', default=False) @@ -225,12 +253,15 @@ class PipX(StateModuleHelper): required_if=[ ('state', 'present', ['name']), ('state', 'install', ['name']), + ('state', 'install_all', ['spec_metadata']), ('state', 'absent', ['name']), ('state', 'uninstall', ['name']), ('state', 'upgrade', ['name']), ('state', 'reinstall', ['name']), ('state', 'latest', ['name']), ('state', 'inject', ['name', 'inject_packages']), + ('state', 'pin', ['name']), + ('state', 'unpin', ['name']), ], required_by=dict( suffix="name", @@ -284,8 +315,7 @@ class PipX(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) def state_install(self): if not self.vars.application or self.vars.force: @@ -297,6 +327,12 @@ class PipX(StateModuleHelper): state_present = state_install + def state_install_all(self): + self.changed = True + with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: + ctx.run(name_source=[self.vars.name, self.vars.source]) + self._capture_results(ctx) + def state_upgrade(self): name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: @@ -336,6 +372,14 @@ class PipX(StateModuleHelper): ctx.run(name=name) self._capture_results(ctx) + def state_uninject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to uninject packages into a non-existent application: {0}".format(name)) + with self.runner('state global name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + def state_uninstall_all(self): with self.runner('state global', check_mode_skip=True) as ctx: ctx.run() @@ -353,6 +397,11 @@ class PipX(StateModuleHelper): ctx.run() self._capture_results(ctx) + def state_upgrade_shared(self): + with self.runner('state global pip_args', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True @@ -365,6 +414,16 @@ class PipX(StateModuleHelper): ctx.run(state='upgrade') self._capture_results(ctx) + def state_pin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_unpin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + def main(): PipX.execute() diff --git a/tests/integration/targets/pipx/files/spec.json b/tests/integration/targets/pipx/files/spec.json new file mode 100644 index 0000000000..3c85125337 --- /dev/null +++ b/tests/integration/targets/pipx/files/spec.json @@ -0,0 +1,91 @@ +{ + "pipx_spec_version": "0.1", + "venvs": { + "black": { + "metadata": { + "injected_packages": {}, + "main_package": { + "app_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/black/bin/black", + "__type__": "Path" + }, + { + "__Path__": "/home/az/.local/pipx/venvs/black/bin/blackd", + "__type__": "Path" + } + ], + "app_paths_of_dependencies": {}, + "apps": [ + "black", + "blackd" + ], + "apps_of_dependencies": [], + "include_apps": true, + "include_dependencies": false, + "man_pages": [], + "man_pages_of_dependencies": [], + "man_paths": [], + "man_paths_of_dependencies": {}, + "package": "black", + "package_or_url": "black", + "package_version": "24.8.0", + "pinned": false, + "pip_args": [], + "suffix": "" + }, + "pipx_metadata_version": "0.5", + "python_version": "Python 3.11.9", + "source_interpreter": { + "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", + "__type__": "Path" + }, + "venv_args": [] + } + }, + "pycowsay": { + "metadata": { + "injected_packages": {}, + "main_package": { + "app_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/pycowsay/bin/pycowsay", + "__type__": "Path" + } + ], + "app_paths_of_dependencies": {}, + "apps": [ + "pycowsay" + ], + "apps_of_dependencies": [], + "include_apps": true, + "include_dependencies": false, + "man_pages": [ + "man6/pycowsay.6" + ], + "man_pages_of_dependencies": [], + "man_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/pycowsay/share/man/man6/pycowsay.6", + "__type__": "Path" + } + ], + "man_paths_of_dependencies": {}, + "package": "pycowsay", + "package_or_url": "pycowsay", + "package_version": "0.0.0.2", + "pinned": false, + "pip_args": [], + "suffix": "" + }, + "pipx_metadata_version": "0.5", + "python_version": "Python 3.11.9", + "source_interpreter": { + "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", + "__type__": "Path" + }, + "venv_args": [] + } + }, + } +} diff --git a/tests/integration/targets/pipx/files/spec.json.license b/tests/integration/targets/pipx/files/spec.json.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/pipx/files/spec.json.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index f1a993aa56..30e96ef1bf 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -247,3 +247,12 @@ block: - name: Include testcase for PR 8793 --global ansible.builtin.include_tasks: testcase-8793-global.yml + + - name: Include testcase for PR 8809 install-all + ansible.builtin.include_tasks: testcase-8809-install-all.yml + + - name: Include testcase for PR 8809 pin + ansible.builtin.include_tasks: testcase-8809-pin.yml + + - name: Include testcase for PR 8809 injectpkg + ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml new file mode 100644 index 0000000000..37816247c0 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml @@ -0,0 +1,59 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_1 + + - name: Use install-all + community.general.pipx: + state: install-all + spec_metadata: spec.json + register: install_all + + - name: Run pycowsay (should succeed) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + register: what_the_cow_said + + - name: Which cow? + ansible.builtin.command: which pycowsay + changed_when: false + register: which_cow + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - uninstall_all_1 is not changed + - install_all is changed + - "'Moooooooo!' in what_the_cow_said.stdout" + - "'/usr/local/bin/pycowsay' in which_cow.stdout" + - uninstall_all_2 is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml new file mode 100644 index 0000000000..89e4bb9dc6 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml @@ -0,0 +1,69 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: pycowsay + + # latest is 0.0.0.2 + - name: Install pycowsay 0.0.0.1 + community.general.pipx: + state: install + name: pycowsay + source: pycowsay==0.0.0.1 + + - name: Pin cowsay + community.general.pipx: + state: pin + name: pycowsay + register: pin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_1 + + - name: Unpin cowsay + community.general.pipx: + state: unpin + name: pycowsay + register: unpin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_2 + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - pin_cow is changed + - cow_info_1 == "0.0.0.1" + - unpin_cow is changed + - cow_info_2 != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml new file mode 100644 index 0000000000..89e4bb9dc6 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml @@ -0,0 +1,69 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: pycowsay + + # latest is 0.0.0.2 + - name: Install pycowsay 0.0.0.1 + community.general.pipx: + state: install + name: pycowsay + source: pycowsay==0.0.0.1 + + - name: Pin cowsay + community.general.pipx: + state: pin + name: pycowsay + register: pin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_1 + + - name: Unpin cowsay + community.general.pipx: + state: unpin + name: pycowsay + register: unpin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_2 + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - pin_cow is changed + - cow_info_1 == "0.0.0.1" + - unpin_cow is changed + - cow_info_2 != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml index 60296024e4..63d33ba92c 100644 --- a/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml +++ b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml @@ -3,17 +3,17 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: ensure application pylint is uninstalled +- name: Ensure application pylint is uninstalled community.general.pipx: name: pylint state: absent -- name: install application pylint +- name: Install application pylint community.general.pipx: name: pylint register: install_pylint -- name: inject packages +- name: Inject packages community.general.pipx: state: inject name: pylint @@ -21,7 +21,7 @@ - licenses register: inject_pkgs_pylint -- name: inject packages with apps +- name: Inject packages with apps community.general.pipx: state: inject name: pylint @@ -30,13 +30,13 @@ install_apps: true register: inject_pkgs_apps_pylint -- name: cleanup pylint +- name: Cleanup pylint community.general.pipx: state: absent name: pylint register: uninstall_pylint -- name: check assertions inject_packages +- name: Check assertions inject_packages assert: that: - install_pylint is changed From 26df6c765767d830ab431570a71859ba82dbfe51 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 9 Sep 2024 00:22:37 +1200 Subject: [PATCH 220/482] use dict comprehension in plugins, part 3 (#8833) * use dict comprehension in plugins, part 3 * add changelog frag --- .../fragments/8833-dict-comprehension.yml | 23 +++++++++++++++++++ plugins/cache/redis.py | 2 +- plugins/lookup/onepassword.py | 2 +- plugins/module_utils/ocapi_utils.py | 8 +++---- plugins/module_utils/redfish_utils.py | 7 +++--- plugins/module_utils/scaleway.py | 2 +- plugins/modules/alternatives.py | 2 +- plugins/modules/apache2_mod_proxy.py | 2 +- plugins/modules/consul_acl.py | 4 ++-- plugins/modules/imc_rest.py | 3 +-- plugins/modules/keycloak_user_federation.py | 7 ++++-- plugins/modules/pids.py | 2 +- plugins/modules/pipx.py | 4 +--- plugins/modules/pipx_info.py | 4 +--- plugins/modules/pkg5_publisher.py | 4 +--- plugins/modules/scaleway_compute.py | 10 ++++---- plugins/modules/scaleway_ip.py | 12 ++++------ plugins/modules/scaleway_lb.py | 10 ++++---- plugins/modules/scaleway_security_group.py | 6 ++--- plugins/modules/scaleway_user_data.py | 6 ++--- plugins/modules/sensu_silence.py | 2 +- plugins/modules/snmp_facts.py | 6 ++++- plugins/modules/sorcery.py | 2 +- .../plugins/module_utils/test_cmd_runner.py | 2 +- .../module_utils/test_python_runner.py | 10 ++++---- 25 files changed, 81 insertions(+), 61 deletions(-) create mode 100644 changelogs/fragments/8833-dict-comprehension.yml diff --git a/changelogs/fragments/8833-dict-comprehension.yml b/changelogs/fragments/8833-dict-comprehension.yml new file mode 100644 index 0000000000..1515609e69 --- /dev/null +++ b/changelogs/fragments/8833-dict-comprehension.yml @@ -0,0 +1,23 @@ +minor_changes: + - redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index e01083e863..f96aafaa84 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -227,7 +227,7 @@ class CacheModule(BaseCacheModule): def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) + ret = {k: self.get(k) for k in self.keys()} return ret def __getstate__(self): diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index f9b8c6dfa3..921cf9acb8 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -135,7 +135,7 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): self._version = None def _check_required_params(self, required_params): - non_empty_attrs = dict((param, getattr(self, param, None)) for param in required_params if getattr(self, param, None)) + non_empty_attrs = {param: getattr(self, param) for param in required_params if getattr(self, param, None)} missing = set(required_params).difference(non_empty_attrs) if missing: prefix = "Unable to sign in to 1Password. Missing required parameter" diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py index 232c915060..8b8687199a 100644 --- a/plugins/module_utils/ocapi_utils.py +++ b/plugins/module_utils/ocapi_utils.py @@ -56,7 +56,7 @@ class OcapiUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout) data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s'" @@ -86,7 +86,7 @@ class OcapiUtils(object): data = json.loads(to_native(resp.read())) else: data = "" - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s'" @@ -113,7 +113,7 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on PUT request to '%s'" @@ -144,7 +144,7 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout if timeout is None else timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s'" diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c79506075a..b7fdeb3a52 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -151,7 +151,7 @@ class RedfishUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=timeout, ciphers=self.ciphers) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} try: if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'): # Older versions of Ansible do not automatically decompress the data @@ -206,7 +206,7 @@ class RedfishUtils(object): except Exception as e: # No response data; this is okay in many cases data = None - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, @@ -610,8 +610,7 @@ class RedfishUtils(object): data = response['data'] if 'Parameters' in data: params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) + ai = {p['Name']: p for p in params if 'Name' in p} if not ai: ai = { k[:-24]: {'AllowableValues': v} diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 2b21ec3793..4768aafc9c 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -140,7 +140,7 @@ def resource_attributes_should_be_changed(target, wished, verifiable_mutable_att diff[attr] = wished[attr] if diff: - return dict((attr, wished[attr]) for attr in mutable_attributes) + return {attr: wished[attr] for attr in mutable_attributes} else: return diff diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py index 0d1b1e8cbe..da578276fa 100644 --- a/plugins/modules/alternatives.py +++ b/plugins/modules/alternatives.py @@ -344,7 +344,7 @@ class AlternativesModule(object): subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) if not subcmd_path_map and self.subcommands: - subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands) + subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} for path, prio, subcmd in alternative_regex.findall(display_output): self.current_alternatives[path] = dict( diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index 87e99bdd9a..786089d13c 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -277,7 +277,7 @@ class BalancerMember(object): for valuesset in subsoup[1::1]: if re.search(pattern=self.host, string=str(valuesset)): values = valuesset.findAll('td') - return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + return {keys[x].string: values[x].string for x in range(0, len(keys))} def get_member_status(self): """ Returns a dictionary of a balancer member's status attributes.""" diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py index 4617090fd3..2d60af0625 100644 --- a/plugins/modules/consul_acl.py +++ b/plugins/modules/consul_acl.py @@ -273,8 +273,8 @@ def set_acl(consul_client, configuration): :return: the output of setting the ACL """ acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) + existing_acls_mapped_by_name = {acl.name: acl for acl in acls_as_json if acl.name is not None} + existing_acls_mapped_by_token = {acl.token: acl for acl in acls_as_json} if None in existing_acls_mapped_by_token: raise AssertionError("expecting ACL list to be associated to a token: %s" % existing_acls_mapped_by_token[None]) diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index 7f5a5e0814..946dfe7f10 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -323,8 +323,7 @@ def merge(one, two): ''' Merge two complex nested datastructures into one''' if isinstance(one, dict) and isinstance(two, dict): copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + copy.update({key: merge(one.get(key, None), two[key]) for key in two}) return copy elif isinstance(one, list) and isinstance(two, list): diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 3f65af3a93..90760f7ce9 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -856,8 +856,11 @@ def main(): if mappers is not None: for mapper in mappers: if mapper.get('config') is not None: - mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in mapper['config'].items() if mapper['config'][k] is not None) + mapper['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in mapper['config'].items() + if mapper['config'][k] is not None + } # Filter and map the parameters names that apply comp_params = [x for x in module.params diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py index 590f1e85a5..99b52ef1dd 100644 --- a/plugins/modules/pids.py +++ b/plugins/modules/pids.py @@ -111,7 +111,7 @@ class PSAdapter(object): attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) def _get_proc_attributes(self, proc, *attributes): - return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + return {attribute: self._get_attribute_from_proc(proc, attribute) for attribute in attributes} @staticmethod @abc.abstractmethod diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 38efc56ffc..4793dd49ea 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -280,9 +280,7 @@ class PipX(StateModuleHelper): for venv_name, venv in raw_data['venvs'].items(): results[venv_name] = { 'version': venv['metadata']['main_package']['package_version'], - 'injected': dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ), + 'injected': {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}, } return results diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index dee3125da2..816729f9a6 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -196,9 +196,7 @@ class PipXInfo(ModuleHelper): 'version': venv['metadata']['main_package']['package_version'] } if self.vars.include_injected: - entry['injected'] = dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ) + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} if self.vars.include_deps: entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) results.append(entry) diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py index 9d1b381385..6d07e455f4 100644 --- a/plugins/modules/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -183,9 +183,7 @@ def get_publishers(module): name = values['publisher'] if name not in publishers: - publishers[name] = dict( - (k, values[k]) for k in ['sticky', 'enabled'] - ) + publishers[name] = {k: values[k] for k in ['sticky', 'enabled']} publishers[name]['origin'] = [] publishers[name]['mirror'] = [] diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index 58a3215056..d8480c199d 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -586,9 +586,11 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve compute_api.module.debug("Checking if server attributes should be changed") compute_api.module.debug("Current Server: %s" % target_server) compute_api.module.debug("Wished Server: %s" % wished_server) - debug_dict = dict((x, (target_server[x], wished_server[x])) - for x in PATCH_MUTABLE_SERVER_ATTRIBUTES - if x in target_server and x in wished_server) + debug_dict = { + x: (target_server[x], wished_server[x]) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server + } compute_api.module.debug("Debug dict %s" % debug_dict) try: for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: @@ -614,7 +616,7 @@ def server_change_attributes(compute_api, target_server, wished_server): # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: # Setting all key to current value except ID - key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") + key_dict = {x: target_server[key][x] for x in target_server[key].keys() if x != "id"} # Setting ID to the user specified ID key_dict["id"] = wished_server[key] patch_payload[key] = key_dict diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py index 1c9042742b..79f0c7e3fb 100644 --- a/plugins/modules/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -145,11 +145,11 @@ def ip_attributes_should_be_changed(api, target_ip, wished_ip): def payload_from_wished_ip(wished_ip): - return dict( - (k, v) + return { + k: v for k, v in wished_ip.items() if k != 'id' and v is not None - ) + } def present_strategy(api, wished_ip): @@ -161,8 +161,7 @@ def present_strategy(api, wished_ip): response.status_code, response.json['message'])) ips_list = response.json["ips"] - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): changed = True @@ -212,8 +211,7 @@ def absent_strategy(api, wished_ip): api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( status_code, response.json['message'])) - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): return changed, {} diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 5bd16c3f4e..1083b6da9e 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -224,10 +224,10 @@ def wait_to_complete_state_transition(api, lb, force_wait=False): def lb_attributes_should_be_changed(target_lb, wished_lb): - diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) + diff = {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]} if diff: - return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) + return {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES} else: return diff @@ -241,8 +241,7 @@ def present_strategy(api, wished_lb): response.status_code, response.json['message'])) lbs_list = response.json["lbs"] - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): changed = True @@ -298,8 +297,7 @@ def absent_strategy(api, wished_lb): api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( status_code, response.json['message'])) - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): return changed, {} diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index 0c7f3f6ec6..3aee99e99a 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -149,8 +149,7 @@ def present_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): ret['changed'] = True @@ -181,8 +180,7 @@ def absent_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): return ret diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py index 08ff86a55e..601231def9 100644 --- a/plugins/modules/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -129,10 +129,10 @@ def core(module): compute_api.module.fail_json(msg=msg) present_user_data_keys = user_data_list.json["user_data"] - present_user_data = dict( - (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) + present_user_data = { + key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) for key in present_user_data_keys - ) + } if present_user_data == user_data: module.exit_json(changed=changed, msg=user_data_list.json) diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py index 14c664755d..25dfc239eb 100644 --- a/plugins/modules/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -149,7 +149,7 @@ def clear(module, url, check, subscription): # Test if silence exists before clearing (rc, out, changed) = query(module, url, check, subscription) - d = dict((i['subscription'], i['check']) for i in out) + d = {i['subscription']: i['check'] for i in out} subscription_exists = subscription in d if check and subscription_exists: exists = (check == d[subscription]) diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index 39c75bcd93..d561f93f02 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -300,7 +300,11 @@ def main(): deps.validate(module) cmdGen = cmdgen.CommandGenerator() - transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) + transport_opts = { + k: m_args[k] + for k in ('timeout', 'retries') + if m_args[k] is not None + } # Verify that we receive a community when using snmp v2 if m_args['version'] in ("v2", "v2c"): diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py index 4fcf46a052..a525bd9ac8 100644 --- a/plugins/modules/sorcery.py +++ b/plugins/modules/sorcery.py @@ -280,7 +280,7 @@ def codex_list(module, skip_new=False): # return only specified grimoires unless requested to skip new if params['repository'] and not skip_new: - codex = dict((x, codex.get(x, NA)) for x in params['name']) + codex = {x: codex.get(x, NA) for x in params['name']} if not codex: module.fail_json(msg="no grimoires to operate on; add at least one") diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index 8cee57b01e..da93292197 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -359,7 +359,7 @@ def test_runner_context(runner_input, cmd_execution, expected): ) def _assert_run_info(actual, expected): - reduced = dict((k, actual[k]) for k in expected.keys()) + reduced = {k: actual[k] for k in expected.keys()} assert reduced == expected, "{0}".format(reduced) def _assert_run(runner_input, cmd_execution, expected, ctx, results): diff --git a/tests/unit/plugins/module_utils/test_python_runner.py b/tests/unit/plugins/module_utils/test_python_runner.py index 015065bdd4..8572ee7d78 100644 --- a/tests/unit/plugins/module_utils/test_python_runner.py +++ b/tests/unit/plugins/module_utils/test_python_runner.py @@ -189,9 +189,11 @@ def test_runner_context(runner_input, cmd_execution, expected): def _extract_path(run_info): path = run_info.get("environ_update", {}).get("PATH") if path is not None: - run_info["environ_update"] = dict((k, v) - for k, v in run_info["environ_update"].items() - if k != "PATH") + run_info["environ_update"] = { + k: v + for k, v in run_info["environ_update"].items() + if k != "PATH" + } return run_info, path def _assert_run_info_env_path(actual, expected): @@ -199,7 +201,7 @@ def test_runner_context(runner_input, cmd_execution, expected): assert expected in actual2, "Missing expected path {0} in output PATH: {1}".format(expected, actual) def _assert_run_info(actual, expected): - reduced = dict((k, actual[k]) for k in expected.keys()) + reduced = {k: actual[k] for k in expected.keys()} reduced, act_path = _extract_path(reduced) expected, exp_path = _extract_path(expected) if exp_path is not None: From 982b8d89b7373d4098020b1491ff3d5478fa9730 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Sun, 8 Sep 2024 14:23:27 +0200 Subject: [PATCH 221/482] keycloak_user_federation: sort desired and after mappers by name (#8761) * sort desired mappers by name * sort mappers fetched after update by name * only sort mapper list if there are desired mappers specified * add fallback `''` in case `name` is not a key or `None` when sorting mappers * add changelog fragment --- ...ser_federation-sort-desired-and-after-mappers-by-name.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml diff --git a/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml b/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml new file mode 100644 index 0000000000..2d7d39345f --- /dev/null +++ b/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 90760f7ce9..f80d694e07 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -883,7 +883,7 @@ def main(): # if user federation exists, get associated mappers if cid is not None and before_comp: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) + before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') # Build a proposed changeset from parameters given to this module changeset = {} @@ -924,6 +924,7 @@ def main(): if changeset.get('mappers') is None: changeset['mappers'] = list() changeset['mappers'].append(new_mapper) + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('name') or '') # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: @@ -1039,7 +1040,7 @@ def main(): kc.create_component(mapper, realm) after_comp = kc.get_component(cid, realm) - after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') after_comp_sanitized = sanitize(after_comp) before_comp_sanitized = sanitize(before_comp) result['end_state'] = after_comp_sanitized From d9b0c42f5f9e6dfb0303071faab92fc20face84b Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Sun, 8 Sep 2024 15:24:33 +0300 Subject: [PATCH 222/482] Add one_vnet module (#8769) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add one_vnet module * Add one_vnet integration tests * Update BOTMETA.yml * Update aliases --------- Co-authored-by: Александр Бакановский --- .github/BOTMETA.yml | 2 + plugins/modules/one_vnet.py | 434 ++++++++++++++++++ tests/integration/targets/one_vnet/aliases | 7 + .../targets/one_vnet/tasks/main.yml | 173 +++++++ 4 files changed, 616 insertions(+) create mode 100644 plugins/modules/one_vnet.py create mode 100644 tests/integration/targets/one_vnet/aliases create mode 100644 tests/integration/targets/one_vnet/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fe0c1a62b6..fb8c3cb113 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -974,6 +974,8 @@ files: maintainers: $team_opennebula $modules/one_host.py: maintainers: rvalle + $modules/one_vnet.py: + maintainers: abakanovskii $modules/oneandone_: maintainers: aajdinov edevenport $modules/onepassword_info.py: diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py new file mode 100644 index 0000000000..93523f8b4f --- /dev/null +++ b/plugins/modules/one_vnet.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024, Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_vnet +short_description: Manages OpenNebula virtual networks +version_added: 9.4.0 +author: "Alexander Bakanovskii (@abakanovskii)" +requirements: + - pyone +description: + - Manages virtual networks in OpenNebula. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually change. + diff_mode: + support: none +options: + id: + description: + - A O(id) of the network you would like to manage. + - If not set then a new network will be created with the given O(name). + type: int + name: + description: + - A O(name) of the network you would like to manage. If a network with + the given name does not exist it will be created, otherwise it will be + managed by this module. + type: str + template: + description: + - A string containing the network template contents. + type: str + state: + description: + - V(present) - state that is used to manage the network. + - V(absent) - delete the network. + choices: ["present", "absent"] + default: present + type: str + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Make sure the network is present by ID + community.general.one_vnet: + id: 0 + state: present + register: result + +- name: Make sure the network is present by name + community.general.one_vnet: + name: opennebula-bridge + state: present + register: result + +- name: Create a new or update an existing network + community.general.one_vnet: + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = 192.0.2.50, + SIZE = "20" + ] + DNS = 192.0.2.1 + GATEWAY = 192.0.2.1 + +- name: Delete the network by ID + community.general.one_vnet: + id: 0 + state: absent +''' + +RETURN = ''' +id: + description: The network id. + type: int + returned: when O(state=present) + sample: 153 +name: + description: The network name. + type: str + returned: when O(state=present) + sample: app1 +template: + description: The parsed network template. + type: dict + returned: when O(state=present) + sample: + BRIDGE: onebr.1000 + BRIDGE_TYPE: linux + DESCRIPTION: sampletext + PHYDEV: eth0 + SECURITY_GROUPS: 0 + VLAN_ID: 1000 + VN_MAD: 802.1Q +user_id: + description: The network's user name. + type: int + returned: when O(state=present) + sample: 1 +user_name: + description: The network's user id. + type: str + returned: when O(state=present) + sample: oneadmin +group_id: + description: The network's group id. + type: int + returned: when O(state=present) + sample: 1 +group_name: + description: The network's group name. + type: str + returned: when O(state=present) + sample: one-users +owner_id: + description: The network's owner id. + type: int + returned: when O(state=present) + sample: 143 +owner_name: + description: The network's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +permissions: + description: The network's permissions. + type: dict + returned: when O(state=present) + contains: + owner_u: + description: The network's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The network's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The network's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The network's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The network's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The network's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The network's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The network's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The network's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +clusters: + description: The network's clusters. + type: list + returned: when O(state=present) + sample: [0, 100] +bridge: + description: The network's bridge interface. + type: str + returned: when O(state=present) + sample: br0 +bridge_type: + description: The network's bridge type. + type: str + returned: when O(state=present) + sample: linux +parent_network_id: + description: The network's parent network id. + type: int + returned: when O(state=present) + sample: 1 +vm_mad: + description: The network's VM_MAD. + type: str + returned: when O(state=present) + sample: bridge +phydev: + description: The network's physical device (NIC). + type: str + returned: when O(state=present) + sample: eth0 +vlan_id: + description: The network's VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +outer_vlan_id: + description: The network's outer VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +vrouters: + description: The network's list of virtual routers IDs. + type: list + returned: when O(state=present) + sample: [0, 1] +ar_pool: + description: The network's list of ar_pool. + type: list + returned: when O(state=present) + sample: + - ar_id: 0 + ip: 192.0.2.1 + mac: 6c:1e:46:01:cd:d1 + size: 20 + type: IP4 + - ar_id: 1 + allocated: 0 + ip: 198.51.100.1 + mac: 5d:9b:c0:9e:f6:e5 + size: 20 + type: IP4 +''' + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class NetworksModule(OpenNebulaModule): + + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str', required=False), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -2 means "Resources belonging to all users" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.vnpool.info(-2, -1, -1) + + for template in pool.VMTEMPLATE: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_networks_ar_pool(self, template): + ar_pool = [] + for ar in template.AR_POOL: + ar_pool.append({ + # These params will always be present + 'ar_id': ar['AR_ID'], + 'mac': ar['MAC'], + 'size': ar['SIZE'], + 'type': ar['TYPE'], + # These are optional so firstly check for presence + # and if not present set value to Null + 'allocated': getattr(ar, 'ALLOCATED', 'Null'), + 'ip': getattr(ar, 'IP', 'Null'), + 'global_prefix': getattr(ar, 'GLOBAL_PREFIX', 'Null'), + 'parent_network_ar_id': getattr(ar, 'PARENT_NETWORK_AR_ID', 'Null'), + 'ula_prefix': getattr(ar, 'ULA_PREFIX', 'Null'), + 'vn_mad': getattr(ar, 'VN_MAD', 'Null'), + }) + return ar_pool + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + 'permissions': { + 'owner_u': template.PERMISSIONS.OWNER_U, + 'owner_m': template.PERMISSIONS.OWNER_M, + 'owner_a': template.PERMISSIONS.OWNER_A, + 'group_u': template.PERMISSIONS.GROUP_U, + 'group_m': template.PERMISSIONS.GROUP_M, + 'group_a': template.PERMISSIONS.GROUP_A, + 'other_u': template.PERMISSIONS.OTHER_U, + 'other_m': template.PERMISSIONS.OTHER_M, + 'other_a': template.PERMISSIONS.OTHER_A + }, + 'clusters': template.CLUSTERS.ID, + 'bridge': template.BRIDGE, + 'bride_type': template.BRIDGE_TYPE, + 'parent_network_id': template.PARENT_NETWORK_ID, + 'vm_mad': template.VM_MAD, + 'phydev': template.PHYDEV, + 'vlan_id': template.VLAN_ID, + 'outer_vlan_id': template.OUTER_VLAN_ID, + 'used_leases': template.USED_LEASES, + 'vrouters': template.VROUTERS.ID, + 'ar_pool': self.get_networks_ar_pool(template) + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.vn.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.vn.delete(template.ID) + + return {'changed': True} + + +def main(): + NetworksModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/one_vnet/aliases b/tests/integration/targets/one_vnet/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_vnet/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_vnet/tasks/main.yml b/tests/integration/targets/one_vnet/tasks/main.yml new file mode 100644 index 0000000000..084d4758ad --- /dev/null +++ b/tests/integration/targets/one_vnet/tasks/main.yml @@ -0,0 +1,173 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create a new template +- name: Create a new network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.1" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is created + assert: + that: + - result is changed + + +# Updating a network +- name: Update an existing network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is changed + assert: + that: + - result is changed + +# Testing idempotence using the same template as in previous task +- name: Update an existing network with the same changes again + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is not changed + assert: + that: + - result is not changed + + +# Deletion of networks +- name: Delete a nonexisting network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: i-do-not-exists + state: absent + register: result + +- name: Assert that network is not changed + assert: + that: + - result is not changed + +- name: Delete an existing network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + state: absent + register: result + +- name: Assert that network was deleted + assert: + that: + - result is changed + +# Trying to run with wrong arguments +- name: Try to create use network with state=present and without the template parameter + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + state: present + register: result + ignore_errors: true + +- name: Assert that it failed because network is missing + assert: + that: + - result is failed + +- name: Try to create network with template but without specifying the name parameter + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: present + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + ignore_errors: true + +- name: Assert that it failed because name is required for initial creation + assert: + that: + - result is failed + +- name: Try to use both ID and name at the same time + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: + id: 0 + state: present + register: result + ignore_errors: true + +- name: Assert that it failed because you can use only one at the time + assert: + that: + - result is failed From ea389e70452f30cc8f800ed2bcc4f9eb36559bf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20B=C3=B6sel?= Date: Sun, 8 Sep 2024 14:25:33 +0200 Subject: [PATCH 223/482] bugfix(keycloak): fix parameters for realm key creation (#8823) * bugfix(keycloak): fix parameters for realm key creation * supply changelog fragment * fix formatting * fix formatting * Update changelogs/fragments/8823-keycloak-realm-key.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/8823-keycloak-realm-key.yml | 2 + plugins/modules/keycloak_realm_key.py | 6 +- .../modules/test_keycloak_realm_keys.py | 380 ++++++++++++++++++ 3 files changed, 385 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8823-keycloak-realm-key.yml create mode 100644 tests/unit/plugins/modules/test_keycloak_realm_keys.py diff --git a/changelogs/fragments/8823-keycloak-realm-key.yml b/changelogs/fragments/8823-keycloak-realm-key.yml new file mode 100644 index 0000000000..4c0e591f8e --- /dev/null +++ b/changelogs/fragments/8823-keycloak-realm-key.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823). \ No newline at end of file diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py index 6e762fba9d..edc8a6068e 100644 --- a/plugins/modules/keycloak_realm_key.py +++ b/plugins/modules/keycloak_realm_key.py @@ -68,7 +68,7 @@ options: type: bool parent_id: description: - - The parent_id of the realm key. In practice the ID (name) of the realm. + - The parent_id of the realm key. In practice the name of the realm. type: str required: true provider_id: @@ -300,7 +300,7 @@ def main(): kc = KeycloakAPI(module, connection_header) - params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force"] + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force", "parent_id"] # Filter and map the parameters names that apply to the role component_params = [x for x in module.params @@ -371,7 +371,7 @@ def main(): parent_id = module.params.get('parent_id') # Get a list of all Keycloak components that are of keyprovider type. - realm_keys = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id) + realm_keys = kc.get_components(urlencode(dict(type=provider_type)), parent_id) # If this component is present get its key ID. Confusingly the key ID is # also known as the Provider ID. diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys.py b/tests/unit/plugins/modules/test_keycloak_realm_keys.py new file mode 100644 index 0000000000..628fa54f31 --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules import keycloak_realm_key + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): + """Mock context manager for patching the methods in KeycloakAPI + """ + + obj = keycloak_realm_key.KeycloakAPI + with patch.object(obj, 'get_components', side_effect=get_components) \ + as mock_get_components: + with patch.object(obj, 'get_component', side_effect=get_component) \ + as mock_get_component: + with patch.object(obj, 'create_component', side_effect=create_component) \ + as mock_create_component: + with patch.object(obj, 'update_component', side_effect=update_component) \ + as mock_update_component: + with patch.object(obj, 'delete_component', side_effect=delete_component) \ + as mock_delete_component: + yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealmKeys(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmKeys, self).setUp() + self.module = keycloak_realm_key + + def test_create_when_absent(self): + """Add a new realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'present', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_component_create = [ + { + "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9", + "name": "testkey", + "providerId": "rsa", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + } + ] + # get before_comp, get default_mapper, get after_mapper + return_value_components_get = [ + [], [], [] + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 1) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # must not contain parent_id + mock_create_component.assert_called_once_with({ + 'name': 'testkey', + 'providerId': 'rsa', + 'providerType': 'org.keycloak.keys.KeyProvider', + 'config': { + 'priority': ['0'], + 'enabled': ['true'], + 'privateKey': ['privatekey'], + 'algorithm': ['RS256'], + 'certificate': ['foo'], + 'active': ['true'], + }, + }, 'realm-name') + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'present', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_components_get = [ + [ + + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "testkey", + "providerId": "rsa", + "providerType": "org.keycloak.keys.KeyProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + }, + ], + [], + [] + ] + return_value_component_update = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 1) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'absent', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_components_get = [ + [] + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'absent', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + + return_value_components_get = [ + [ + + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "testkey", + "providerId": "rsa", + "providerType": "org.keycloak.keys.KeyProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + }, + ], + [], + [] + ] + return_value_component_delete = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From afd754e384118768c03f0acee3849900827464cc Mon Sep 17 00:00:00 2001 From: Samuel-BF <36996277+Samuel-BF@users.noreply.github.com> Date: Sun, 8 Sep 2024 14:26:04 +0200 Subject: [PATCH 224/482] (doc) random_string lookup: fix examples (#8827) --- plugins/lookup/random_string.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index d3b29629d7..9b811dd8b3 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -104,37 +104,37 @@ EXAMPLES = r""" - name: Generate random string ansible.builtin.debug: var: lookup('community.general.random_string') - # Example result: ['DeadBeeF'] + # Example result: 'DeadBeeF' - name: Generate random string with length 12 ansible.builtin.debug: var: lookup('community.general.random_string', length=12) - # Example result: ['Uan0hUiX5kVG'] + # Example result: 'Uan0hUiX5kVG' - name: Generate base64 encoded random string ansible.builtin.debug: var: lookup('community.general.random_string', base64=True) - # Example result: ['NHZ6eWN5Qk0='] + # Example result: 'NHZ6eWN5Qk0=' - name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least) ansible.builtin.debug: var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) - # Example result: ['&Qw2|E[-'] + # Example result: '&Qw2|E[-' - name: Generate a random string with all lower case characters - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, numbers=false, special=false) # Example result: ['exolxzyz'] - name: Generate random hexadecimal string - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) vars: hex_chars: '0123456789ABCDEF' # Example result: ['D2A40737'] - name: Generate random hexadecimal string with override_all - debug: + ansible.builtin.debug: var: query('community.general.random_string', override_all=hex_chars) vars: hex_chars: '0123456789ABCDEF' From d73f977b7ad0f0b2d1fce9861e80e5a197b8afd9 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 8 Sep 2024 16:16:14 +0200 Subject: [PATCH 225/482] Remove link to Google Groups mailing list (#8843) Remove link to Google Groups mailing list. Ref: https://groups.google.com/g/ansible-project/c/B0oKR0aQqXs --- docs/docsite/links.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index 32d56eacc6..fe41d1d2fd 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -24,9 +24,6 @@ communication: - topic: General usage and support questions network: Libera channel: '#ansible' - mailing_lists: - - topic: Ansible Project List - url: https://groups.google.com/g/ansible-project forums: - topic: "Ansible Forum: General usage and support questions" # The following URL directly points to the "Get Help" section From 529af4984c508fdd2295cc3103adad4edd2bafe9 Mon Sep 17 00:00:00 2001 From: Eike Waldt Date: Mon, 9 Sep 2024 13:51:07 +0200 Subject: [PATCH 226/482] keycloak_userprofile: new module (#8651) keycloak_userprofile: new keycloak module to manage user profiles (#8651) --- .github/BOTMETA.yml | 2 + plugins/modules/keycloak_userprofile.py | 732 +++++++++++++++ .../targets/keycloak_group/tasks/main.yml | 4 +- .../targets/keycloak_userprofile/aliases | 5 + .../keycloak_userprofile/meta/main.yml | 7 + .../targets/keycloak_userprofile/readme.adoc | 27 + .../keycloak_userprofile/tasks/main.yml | 301 ++++++ .../keycloak_userprofile/vars/main.yml | 111 +++ .../modules/test_keycloak_userprofile.py | 866 ++++++++++++++++++ 9 files changed, 2053 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/keycloak_userprofile.py create mode 100644 tests/integration/targets/keycloak_userprofile/aliases create mode 100644 tests/integration/targets/keycloak_userprofile/meta/main.yml create mode 100644 tests/integration/targets/keycloak_userprofile/readme.adoc create mode 100644 tests/integration/targets/keycloak_userprofile/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_userprofile/vars/main.yml create mode 100644 tests/unit/plugins/modules/test_keycloak_userprofile.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fb8c3cb113..bc34755b31 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -809,6 +809,8 @@ files: maintainers: elfelip $modules/keycloak_user_federation.py: maintainers: laurpaum + $modules/keycloak_userprofile.py: + maintainers: yeoldegrove $modules/keycloak_component_info.py: maintainers: desand01 $modules/keycloak_client_rolescope.py: diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py new file mode 100644 index 0000000000..ba5dc127d2 --- /dev/null +++ b/plugins/modules/keycloak_userprofile.py @@ -0,0 +1,732 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_userprofile + +short_description: Allows managing Keycloak User Profiles + +description: + - This module allows you to create, update, or delete Keycloak User Profiles via Keycloak API. You can also customize the "Unmanaged Attributes" with it. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). + For compatibility reasons, the module also accepts the camelCase versions of the options. + +version_added: "9.4.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the User Profile provider. + - On V(present), the User Profile provider will be created if it does not yet exist, or updated with + the parameters you provide. + - On V(absent), the User Profile provider will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + parent_id: + description: + - The parent ID of the realm key. In practice the ID (name) of the realm. + aliases: + - parentId + - realm + type: str + required: true + + provider_id: + description: + - The name of the provider ID for the key (supported value is V(declarative-user-profile)). + aliases: + - providerId + choices: ['declarative-user-profile'] + default: 'declarative-user-profile' + type: str + + provider_type: + description: + - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)). + aliases: + - providerType + choices: ['org.keycloak.userprofile.UserProfileProvider'] + default: org.keycloak.userprofile.UserProfileProvider + type: str + + config: + description: + - The configuration of the User Profile Provider. + type: dict + required: false + suboptions: + kc_user_profile_config: + description: + - Define a declarative User Profile. See EXAMPLES for more context. + aliases: + - kcUserProfileConfig + type: list + elements: dict + suboptions: + attributes: + description: + - A list of attributes to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the attribute. + type: str + required: true + + display_name: + description: + - The display name of the attribute. + aliases: + - displayName + type: str + required: true + + validations: + description: + - The validations to be applied to the attribute. + type: dict + suboptions: + length: + description: + - The length validation for the attribute. + type: dict + suboptions: + min: + description: + - The minimum length of the attribute. + type: int + max: + description: + - The maximum length of the attribute. + type: int + required: true + + email: + description: + - The email validation for the attribute. + type: dict + + username_prohibited_characters: + description: + - The prohibited characters validation for the username attribute. + type: dict + aliases: + - usernameProhibitedCharacters + + up_username_not_idn_homograph: + description: + - The validation to prevent IDN homograph attacks in usernames. + type: dict + aliases: + - upUsernameNotIdnHomograph + + person_name_prohibited_characters: + description: + - The prohibited characters validation for person name attributes. + type: dict + aliases: + - personNameProhibitedCharacters + + uri: + description: + - The URI validation for the attribute. + type: dict + + pattern: + description: + - The pattern validation for the attribute using regular expressions. + type: dict + + options: + description: + - Validation to ensure the attribute matches one of the provided options. + type: dict + + annotations: + description: + - Annotations for the attribute. + type: dict + + group: + description: + - Specifies the User Profile group where this attribute will be added. + type: str + + permissions: + description: + - The permissions for viewing and editing the attribute. + type: dict + suboptions: + view: + description: + - The roles that can view the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + edit: + description: + - The roles that can edit the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + multivalued: + description: + - Whether the attribute can have multiple values. + type: bool + default: false + + required: + description: + - The roles that require this attribute. + type: dict + suboptions: + roles: + description: + - The roles for which this attribute is required. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - user + + groups: + description: + - A list of attribute groups to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the group. + type: str + required: true + + display_header: + description: + - The display header for the group. + aliases: + - displayHeader + type: str + required: true + + display_description: + description: + - The display description for the group. + aliases: + - displayDescription + type: str + required: false + + annotations: + description: + - The annotations included in the group. + type: dict + required: false + + unmanaged_attribute_policy: + description: + - Policy for unmanaged attributes. + aliases: + - unmanagedAttributePolicy + type: str + choices: + - ENABLED + - ADMIN_EDIT + - ADMIN_VIEW + +notes: + - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). + However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Eike Waldt (@yeoldegrove) +''' + +EXAMPLES = ''' +- name: Create a Declarative User Profile with default settings + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - attributes: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + username_prohibited_characters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + groups: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata + annotations: {} + +- name: Delete a Keycloak User Profile Provider + keycloak_userprofile: + state: absent + parent_id: master + +# Unmanaged attributes are user attributes not explicitly defined in the User Profile +# configuration. By default, unmanaged attributes are "Disabled" and are not +# available from any context such as registration, account, and the +# administration console. By setting "Enabled", unmanaged attributes are fully +# recognized by the server and accessible through all contexts, useful if you are +# starting migrating an existing realm to the declarative User Profile +# and you don't have yet all user attributes defined in the User Profile configuration. +- name: Enable Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + +# By setting "Only administrators can write", unmanaged attributes can be managed +# only through the administration console and API, useful if you have already +# defined any custom attribute that can be managed by users but you are unsure +# about adding other attributes that should only be managed by administrators. +- name: Enable ADMIN_EDIT on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + +# By setting `Only administrators can view`, unmanaged attributes are read-only +# and only available through the administration console and API. +- name: Enable ADMIN_VIEW on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW +''' + +RETURN = ''' +msg: + description: The output message generated by the module. + returned: always + type: str + sample: UserProfileProvider created successfully +data: + description: The data returned by the Keycloak API. + returned: when state is present + type: dict + sample: {...} +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from copy import deepcopy +import json + + +def remove_null_values(data): + if isinstance(data, dict): + # Recursively remove null values from dictionaries + return {k: remove_null_values(v) for k, v in data.items() if v is not None} + elif isinstance(data, list): + # Recursively remove null values from lists + return [remove_null_values(item) for item in data if item is not None] + else: + # Return the data if it's neither a dictionary nor a list + return data + + +def camel_recursive(data): + if isinstance(data, dict): + # Convert keys to camelCase and apply recursively + return {camel(k): camel_recursive(v) for k, v in data.items()} + elif isinstance(data, list): + # Apply camelCase conversion to each item in the list + return [camel_recursive(item) for item in data] + else: + # Return the data as is if it's not a dict or list + return data + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + parent_id=dict(type='str', aliases=['parentId', 'realm'], required=True), + provider_id=dict(type='str', aliases=['providerId'], default='declarative-user-profile', choices=['declarative-user-profile']), + provider_type=dict( + type='str', + aliases=['providerType'], + default='org.keycloak.userprofile.UserProfileProvider', + choices=['org.keycloak.userprofile.UserProfileProvider'] + ), + config=dict( + type='dict', + required=False, + options={ + 'kc_user_profile_config': dict( + type='list', + aliases=['kcUserProfileConfig'], + elements='dict', + options={ + 'attributes': dict( + type='list', + elements='dict', + required=False, + options={ + 'name': dict(type='str', required=True), + 'display_name': dict(type='str', aliases=['displayName'], required=True), + 'validations': dict( + type='dict', + options={ + 'length': dict( + type='dict', + options={ + 'min': dict(type='int', required=False), + 'max': dict(type='int', required=True) + } + ), + 'email': dict(type='dict', required=False), + 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters'], required=False), + 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph'], required=False), + 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters'], required=False), + 'uri': dict(type='dict', required=False), + 'pattern': dict(type='dict', required=False), + 'options': dict(type='dict', required=False) + } + ), + 'annotations': dict(type='dict'), + 'group': dict(type='str'), + 'permissions': dict( + type='dict', + options={ + 'view': dict(type='list', elements='str', default=['admin', 'user']), + 'edit': dict(type='list', elements='str', default=['admin', 'user']) + } + ), + 'multivalued': dict(type='bool', default=False), + 'required': dict( + type='dict', + options={ + 'roles': dict(type='list', elements='str', default=['user']) + } + ) + } + ), + 'groups': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_header': dict(type='str', aliases=['displayHeader'], required=True), + 'display_description': dict(type='str', aliases=['displayDescription'], required=False), + 'annotations': dict(type='dict', required=False) + } + ), + 'unmanaged_attribute_policy': dict( + type='str', + aliases=['unmanagedAttributePolicy'], + choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], + required=False + ) + } + ) + } + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm userprofile if it is already + # present. This is only used for diff-mode. + before_realm_userprofile = {} + before_realm_userprofile['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] + + # Filter and map the parameters names that apply to the role + component_params = [ + x + for x in module.params + if x not in params_to_ignore and module.params.get(x) is not None + ] + + # Build a proposed changeset from parameters given to this module + changeset = {} + + # Build the changeset with proper JSON serialization for kc_user_profile_config + config = module.params.get('config') + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example proider_id + # becomes providerId. It also handles some special cases, e.g. aliases. + for component_param in component_params: + # realm/parent_id parameter + if component_param == 'realm' or component_param == 'parent_id': + changeset['parent_id'] = module.params.get(component_param) + changeset.pop(component_param, None) + # complex parameters in config suboptions + elif component_param == 'config': + for config_param in config: + # special parameter kc_user_profile_config + if config_param in ('kcUserProfileConfig', 'kc_user_profile_config'): + config_param_org = config_param + # rename parameter to be accepted by Keycloak API + config_param = 'kc.user.profile.config' + # make sure no null values are passed to Keycloak API + kc_user_profile_config = remove_null_values(config[config_param_org]) + changeset[camel(component_param)][config_param] = [] + if len(kc_user_profile_config) > 0: + # convert aliases to camelCase + kc_user_profile_config = camel_recursive(kc_user_profile_config) + # rename validations to be accepted by Keycloak API + if 'attributes' in kc_user_profile_config[0]: + for attribute in kc_user_profile_config[0]['attributes']: + if 'validations' in attribute: + if 'usernameProhibitedCharacters' in attribute['validations']: + attribute['validations']['username-prohibited-characters'] = ( + attribute['validations'].pop('usernameProhibitedCharacters') + ) + if 'upUsernameNotIdnHomograph' in attribute['validations']: + attribute['validations']['up-username-not-idn-homograph'] = ( + attribute['validations'].pop('upUsernameNotIdnHomograph') + ) + if 'personNameProhibitedCharacters' in attribute['validations']: + attribute['validations']['person-name-prohibited-characters'] = ( + attribute['validations'].pop('personNameProhibitedCharacters') + ) + # special JSON parsing for kc_user_profile_config + value = json.dumps(kc_user_profile_config[0]) + changeset[camel(component_param)][config_param].append(value) + # usual camelCase parameters + else: + changeset[camel(component_param)][camel(config_param)] = [] + raw_value = module.params.get(component_param)[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = raw_value # Directly use the raw value + changeset[camel(component_param)][camel(config_param)].append(value) + # usual parameters + else: + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make it easier to refer to current module parameters + state = module.params.get('state') + enabled = module.params.get('enabled') + parent_id = module.params.get('parent_id') + provider_type = module.params.get('provider_type') + provider_id = module.params.get('provider_id') + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Get a list of all Keycloak components that are of userprofile provider type. + realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id) + + # If this component is present get its userprofile ID. Confusingly the userprofile ID is + # also known as the Provider ID. + userprofile_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the userprofile was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the userprofile is + # already present. + for userprofile in realm_userprofiles: + if provider_id == "declarative-user-profile": + userprofile_id = userprofile['id'] + changeset['id'] = userprofile_id + changeset_copy['id'] = userprofile_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_userprofile[param] = userprofile[param] + + if changeset_copy[param] != userprofile[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, userprofile[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" userprofile + for p, v in changeset_copy['config'].items(): + before_realm_userprofile['config'][p] = userprofile['config'][p] + if changeset_copy['config'][p] != userprofile['config'][p]: + changes += "config.%s: %s -> %s, " % (p, userprofile['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the userprofile). + if userprofile_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Userprofile %s would be changed: %s" % (provider_id, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Userprofile %s changed: %s" % (provider_id, changes.strip(", ")) + else: + result['msg'] = "Userprofile %s was in sync" % (provider_id) + + result['end_state'] = changeset_copy + elif userprofile_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be deleted" % (provider_id) + else: + kc.delete_component(userprofile_id, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s deleted" % (provider_id) + + result['end_state'] = {} + elif not userprofile_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be created" % (provider_id) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s created" % (provider_id) + + result['end_state'] = changeset_copy + elif not userprofile_id and state == 'absent': + result['changed'] = False + result['msg'] = "Userprofile %s not present" % (provider_id) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/keycloak_group/tasks/main.yml b/tests/integration/targets/keycloak_group/tasks/main.yml index 8b115e3a28..f807b0640d 100644 --- a/tests/integration/targets/keycloak_group/tasks/main.yml +++ b/tests/integration/targets/keycloak_group/tasks/main.yml @@ -10,8 +10,8 @@ command: start-dev env: KC_HTTP_RELATIVE_PATH: /auth - KEYCLOAK_ADMIN: admin - KEYCLOAK_ADMIN_PASSWORD: password + KEYCLOAK_ADMIN: "{{ admin_user }}" + KEYCLOAK_ADMIN_PASSWORD: "{{ admin_password }}" ports: - "8080:8080" detach: true diff --git a/tests/integration/targets/keycloak_userprofile/aliases b/tests/integration/targets/keycloak_userprofile/aliases new file mode 100644 index 0000000000..bd1f024441 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +unsupported diff --git a/tests/integration/targets/keycloak_userprofile/meta/main.yml b/tests/integration/targets/keycloak_userprofile/meta/main.yml new file mode 100644 index 0000000000..c583a8fc22 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# dependencies: +# - setup_docker diff --git a/tests/integration/targets/keycloak_userprofile/readme.adoc b/tests/integration/targets/keycloak_userprofile/readme.adoc new file mode 100644 index 0000000000..943dfaf542 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/readme.adoc @@ -0,0 +1,27 @@ +// Copyright (c) Ansible Project +// GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +// SPDX-License-Identifier: GPL-3.0-or-later + +To be able to run these integration tests a keycloak server must be +reachable under a specific url with a specific admin user and password. +The exact values expected for these parameters can be found in +'vars/main.yml' file. A simple way to do this is to use the official +keycloak docker images like this: + +---- +docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH= -e KEYCLOAK_ADMIN= -e KEYCLOAK_ADMIN_PASSWORD= quay.io/keycloak/keycloak:24.0.5 start-dev +---- + +Example with concrete values inserted: + +---- +docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=/auth -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:24.0.5 start-dev +---- + +This test suite can run against a fresh unconfigured server instance +(no preconfiguration required) and cleans up after itself (undoes all +its config changes) as long as it runs through completely. While its active +it changes the server configuration in the following ways: + + * creating, modifying and deleting some keycloak userprofiles + diff --git a/tests/integration/targets/keycloak_userprofile/tasks/main.yml b/tests/integration/targets/keycloak_userprofile/tasks/main.yml new file mode 100644 index 0000000000..37b65d35ed --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/tasks/main.yml @@ -0,0 +1,301 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Start container + community.docker.docker_container: + name: mykeycloak + image: "quay.io/keycloak/keycloak:24.0.5" + command: start-dev + env: + KC_HTTP_RELATIVE_PATH: /auth + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: password + ports: + - "8080:8080" + detach: true + auto_remove: true + memory: 2200M + +- name: Check default ports + ansible.builtin.wait_for: + host: "localhost" + port: "8080" + state: started # Port should be open + delay: 30 # Wait before first check + timeout: 50 # Stop checking after timeout (sec) + +- name: Remove Keycloak test realm to avoid failures from previous failed runs + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: absent + +- name: Create Keycloak test realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: present + +- name: Create default User Profile (check mode) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + parent_id: "{{ realm }}" + config: "{{ config_default }}" + check_mode: true + register: result + +- name: Assert that User Profile would be created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile would be created" + +- name: Create default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_default }}" + diff: true + register: result + +- name: Assert that User Profile was created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile created" + +- name: Create default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_default }}" + register: result + +- name: Assert that User Profile was in sync + assert: + that: + - result is not changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile was in sync" + +- name: Update default User Profile (check mode) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + check_mode: true + register: result + +- name: Assert that User Profile would be changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile would be changed:") + +- name: Update default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + diff: true + register: result + +- name: Assert that User Profile changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Update default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + register: result + +- name: Assert that User Profile was in sync + assert: + that: + - result is not changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile was in sync" + +## No force implemented +# - name: Force update default User Profile +# community.general.keycloak_userprofile: +# auth_keycloak_url: "{{ url }}" +# auth_realm: "{{ admin_realm }}" +# auth_username: "{{ admin_user }}" +# auth_password: "{{ admin_password }}" +# force: true +# state: present +# parent_id: "{{ realm }}" +# config: "{{ config_updated }}" +# register: result +# +# - name: Assert that forced update ran correctly +# assert: +# that: +# - result is changed +# - result.end_state != {} +# - result.end_state.providerId == "declarative-user-profile" +# - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" +# - result.msg == "Userprofile declarative-user-profile was forcibly updated" + +- name: Remove default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: absent + parent_id: "{{ realm }}" + config: "{{ config_default }}" + diff: true + register: result + +- name: Assert that User Profile was deleted + assert: + that: + - result is changed + - result.end_state == {} + - result.msg == "Userprofile declarative-user-profile deleted" + +- name: Remove default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: absent + parent_id: "{{ realm }}" + config: "{{ config_default }}" + register: result + +- name: Assert that User Profile not present + assert: + that: + - result is not changed + - result.end_state == {} + - result.msg == "Userprofile declarative-user-profile not present" + +- name: Create User Profile with unmanaged attributes ENABLED + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_enabled }}" + diff: true + register: result + +- name: Assert that User Profile was created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile created" + +- name: Attempt to change the User Profile to unmanaged ADMIN_EDIT + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_admin_edit }}" + diff: true + register: result + +- name: Assert that User Profile was changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Attempt to change the User Profile to unmanaged ADMIN_VIEW + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_admin_view }}" + diff: true + register: result + +- name: Assert that User Profile was changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Remove Keycloak test realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: absent diff --git a/tests/integration/targets/keycloak_userprofile/vars/main.yml b/tests/integration/targets/keycloak_userprofile/vars/main.yml new file mode 100644 index 0000000000..1f8ae6c823 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/vars/main.yml @@ -0,0 +1,111 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: realm_userprofile_test +attributes_default: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + usernameProhibitedCharacters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + personNameProhibitedCharacters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false +attributes_additional: + - name: additionalAttribute + displayName: additionalAttribute + group: user-metadata + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false +groups_default: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata +config_default: + kc_user_profile_config: + - attributes: "{{ attributes_default }}" + groups: "{{ groups_default }}" +config_updated: + kc_user_profile_config: + - attributes: "{{ attributes_default + attributes_additional }}" + groups: "{{ groups_default }}" +config_unmanaged_attributes_enabled: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + attributes: "{{ attributes_default }}" +config_unmanaged_attributes_admin_edit: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + attributes: "{{ attributes_default }}" +config_unmanaged_attributes_admin_view: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW + attributes: "{{ attributes_default }}" diff --git a/tests/unit/plugins/modules/test_keycloak_userprofile.py b/tests/unit/plugins/modules/test_keycloak_userprofile.py new file mode 100644 index 0000000000..3001201efa --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_userprofile.py @@ -0,0 +1,866 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules import keycloak_userprofile + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): + """Mock context manager for patching the methods in KeycloakAPI + """ + + obj = keycloak_userprofile.KeycloakAPI + with patch.object(obj, 'get_components', side_effect=get_components) as mock_get_components: + with patch.object(obj, 'get_component', side_effect=get_component) as mock_get_component: + with patch.object(obj, 'create_component', side_effect=create_component) as mock_create_component: + with patch.object(obj, 'update_component', side_effect=update_component) as mock_update_component: + with patch.object(obj, 'delete_component', side_effect=delete_component) as mock_delete_component: + yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response(object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), + } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakUserprofile(ModuleTestCase): + def setUp(self): + super(TestKeycloakUserprofile, self).setUp() + self.module = keycloak_userprofile + + def test_create_when_absent(self): + """Add a new userprofile""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "state": "present", + "provider_id": "declarative-user-profile", + "config": { + "kc_user_profile_config": [ + { + "attributes": [ + { + "annotations": {}, + "displayName": "${username}", + "multivalued": False, + "name": "username", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": None, + "validations": { + "length": { + "max": 255, + "min": 3 + }, + "up_username_not_idn_homograph": {}, + "username_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${email}", + "multivalued": False, + "name": "email", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "email": {}, + "length": { + "max": 255 + } + } + }, + { + "annotations": {}, + "displayName": "${firstName}", + "multivalued": False, + "name": "firstName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${lastName}", + "multivalued": False, + "name": "lastName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + } + ], + "groups": [ + { + "displayDescription": "Attributes, which refer to user metadata", + "displayHeader": "User metadata", + "name": "user-metadata" + } + ], + } + ] + } + } + return_value_component_create = [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-name", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + + + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ] + } + } + ] + return_value_get_components_get = [ + [], [] + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, create_component=return_value_component_create) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 1) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing userprofile""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "state": "present", + "provider_id": "declarative-user-profile", + "config": { + "kc_user_profile_config": [ + { + "attributes": [ + { + "annotations": {}, + "displayName": "${username}", + "multivalued": False, + "name": "username", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": None, + "validations": { + "length": { + "max": 255, + "min": 3 + }, + "up_username_not_idn_homograph": {}, + "username_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${email}", + "multivalued": False, + "name": "email", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "email": {}, + "length": { + "max": 255 + } + } + }, + { + "annotations": {}, + "displayName": "${firstName}", + "multivalued": False, + "name": "firstName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${lastName}", + "multivalued": False, + "name": "lastName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + } + ], + "groups": [ + { + "displayDescription": "Attributes, which refer to user metadata", + "displayHeader": "User metadata", + "name": "user-metadata" + } + ], + } + ] + } + } + return_value_get_components_get = [ + [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-1", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ] + } + } + ], + [] + ] + return_value_component_update = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, + update_component=return_value_component_update) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 1) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent userprofile""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'provider_id': 'declarative-user-profile', + 'state': 'absent', + } + return_value_get_components_get = [ + [] + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing userprofile""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'provider_id': 'declarative-user-profile', + 'state': 'absent', + } + return_value_get_components_get = [ + [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-1", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ] + } + } + ], + [] + ] + return_value_component_delete = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, delete_component=return_value_component_delete) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 2ae41fa83f44aefce38cd845e377dcdd73b9f32b Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 9 Sep 2024 14:05:48 +0200 Subject: [PATCH 227/482] keycloak_user_federation: get the before mappers from `before_comp` to fix `UnboundLocalError` (#8831) * fix: get the before mappers from `before_comp` * add changelog fragment * Adjust changelog fragment. --------- Co-authored-by: Felix Fontein --- .../fragments/8831-fix-error-when-mapper-id-is-provided.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml diff --git a/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml b/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml new file mode 100644 index 0000000000..63ac352057 --- /dev/null +++ b/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index f80d694e07..6034aa8b84 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -907,7 +907,7 @@ def main(): if cid is None: old_mapper = {} elif change.get('id') is not None: - old_mapper = next((before_mapper for before_mapper in before_mapper.get('mappers', []) if before_mapper["id"] == change['id']), None) + old_mapper = next((before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper["id"] == change['id']), None) if old_mapper is None: old_mapper = {} else: From 40f1ab31f5b8cee640c42318fbda7ff5cc0b8e86 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 9 Sep 2024 14:55:33 +0200 Subject: [PATCH 228/482] Next feature release will be 9.5.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index e625445649..5112bdc64f 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.4.0 +version: 9.5.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 41d87f5c9dbaeeffab13df8a74a8c1cdcc5fec6a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Sep 2024 08:40:34 +1200 Subject: [PATCH 229/482] gio_mime: adjust module for old vardict deprecation (#8855) * gio_mime: adjust module for old vardict deprecation * add changelog frag --- changelogs/fragments/8855-gio_mime_vardict.yml | 2 ++ plugins/modules/gio_mime.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8855-gio_mime_vardict.yml diff --git a/changelogs/fragments/8855-gio_mime_vardict.yml b/changelogs/fragments/8855-gio_mime_vardict.yml new file mode 100644 index 0000000000..54efa08579 --- /dev/null +++ b/changelogs/fragments/8855-gio_mime_vardict.yml @@ -0,0 +1,2 @@ +minor_changes: + - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 82c583c76f..bb1ef6ebe3 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -84,7 +84,7 @@ class GioMime(ModuleHelper): ), supports_check_mode=True, ) - mute_vardict_deprecation = True + use_old_vardict = False def __init_module__(self): self.runner = gio_mime_runner(self.module, check_rc=True) @@ -92,7 +92,7 @@ class GioMime(ModuleHelper): def __run__(self): check_mode_return = (0, 'Module executed in check mode', '') - if self.vars.has_changed("handler"): + if self.vars.has_changed: with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx: rc, out, err = ctx.run() self.vars.stdout = out From 37dd6ec8a3a77dcc8d147e6eef4546f4093e9530 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Sep 2024 08:40:48 +1200 Subject: [PATCH 230/482] jira: adjust module for old vardict deprecation (#8856) * jira: adjust module for old vardict deprecation * add changelog frag --- changelogs/fragments/8856-jira_vardict.yml | 2 ++ plugins/modules/jira.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8856-jira_vardict.yml diff --git a/changelogs/fragments/8856-jira_vardict.yml b/changelogs/fragments/8856-jira_vardict.yml new file mode 100644 index 0000000000..c4d8357419 --- /dev/null +++ b/changelogs/fragments/8856-jira_vardict.yml @@ -0,0 +1,2 @@ +minor_changes: + - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index 0bb95158f7..64aed7e149 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -531,7 +531,7 @@ class JIRA(StateModuleHelper): ), supports_check_mode=False ) - mute_vardict_deprecation = True + use_old_vardict = False state_param = 'operation' def __init_module__(self): From 94472dd7e5f21823da43fae1227be6df773f58dc Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Sep 2024 08:41:53 +1200 Subject: [PATCH 231/482] use dict comprehension in plugins, part 4 (#8858) * use dict comprehension in plugins, part 4 * add changelog frag --- changelogs/fragments/8858-dict-comprehension.yml | 11 +++++++++++ plugins/modules/scaleway_container.py | 6 ++---- plugins/modules/scaleway_container_info.py | 3 +-- plugins/modules/scaleway_container_namespace.py | 6 ++---- plugins/modules/scaleway_container_namespace_info.py | 3 +-- plugins/modules/scaleway_container_registry.py | 6 ++---- plugins/modules/scaleway_container_registry_info.py | 3 +-- plugins/modules/scaleway_function.py | 6 ++---- plugins/modules/scaleway_function_info.py | 3 +-- plugins/modules/scaleway_function_namespace.py | 6 ++---- plugins/modules/scaleway_function_namespace_info.py | 3 +-- 11 files changed, 26 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/8858-dict-comprehension.yml diff --git a/changelogs/fragments/8858-dict-comprehension.yml b/changelogs/fragments/8858-dict-comprehension.yml new file mode 100644 index 0000000000..47b4acb329 --- /dev/null +++ b/changelogs/fragments/8858-dict-comprehension.yml @@ -0,0 +1,11 @@ +minor_changes: + - scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py index 8764a76349..a18cb1d75f 100644 --- a/plugins/modules/scaleway_container.py +++ b/plugins/modules/scaleway_container.py @@ -260,8 +260,7 @@ def absent_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: return changed, {} @@ -285,8 +284,7 @@ def present_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} payload_cn = payload_from_wished_cn(wished_cn) diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py index 20ebece212..350c96e545 100644 --- a/plugins/modules/scaleway_container_info.py +++ b/plugins/modules/scaleway_container_info.py @@ -97,8 +97,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py index fd56a7d433..0f5de6c31d 100644 --- a/plugins/modules/scaleway_container_namespace.py +++ b/plugins/modules/scaleway_container_namespace.py @@ -167,8 +167,7 @@ def absent_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: return changed, {} @@ -192,8 +191,7 @@ def present_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} payload_cn = payload_from_wished_cn(wished_cn) diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py index 758720dd57..d783747203 100644 --- a/plugins/modules/scaleway_container_namespace_info.py +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -88,8 +88,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py index 6344a7ae66..4f17fecad7 100644 --- a/plugins/modules/scaleway_container_registry.py +++ b/plugins/modules/scaleway_container_registry.py @@ -150,8 +150,7 @@ def absent_strategy(api, wished_cr): changed = False cr_list = api.fetch_all_resources("namespaces") - cr_lookup = dict((cr["name"], cr) - for cr in cr_list) + cr_lookup = {cr["name"]: cr for cr in cr_list} if wished_cr["name"] not in cr_lookup: return changed, {} @@ -175,8 +174,7 @@ def present_strategy(api, wished_cr): changed = False cr_list = api.fetch_all_resources("namespaces") - cr_lookup = dict((cr["name"], cr) - for cr in cr_list) + cr_lookup = {cr["name"]: cr for cr in cr_list} payload_cr = payload_from_wished_cr(wished_cr) diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py index 9c641edcbb..7645789cff 100644 --- a/plugins/modules/scaleway_container_registry_info.py +++ b/plugins/modules/scaleway_container_registry_info.py @@ -87,8 +87,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py index eb121cd9c7..2de0afd987 100644 --- a/plugins/modules/scaleway_function.py +++ b/plugins/modules/scaleway_function.py @@ -245,8 +245,7 @@ def absent_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: return changed, {} @@ -270,8 +269,7 @@ def present_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} payload_fn = payload_from_wished_fn(wished_fn) diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py index c30f0cdb00..d65987664c 100644 --- a/plugins/modules/scaleway_function_info.py +++ b/plugins/modules/scaleway_function_info.py @@ -96,8 +96,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_fn): fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"], diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py index 0ea31e9bcb..7779761e38 100644 --- a/plugins/modules/scaleway_function_namespace.py +++ b/plugins/modules/scaleway_function_namespace.py @@ -168,8 +168,7 @@ def absent_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: return changed, {} @@ -193,8 +192,7 @@ def present_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} payload_fn = payload_from_wished_fn(wished_fn) diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py index f3ea5ddfc8..d5d48ee4dd 100644 --- a/plugins/modules/scaleway_function_namespace_info.py +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -88,8 +88,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_fn): fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"], From 76ebda7fafa29e8d19da56da20c1d656bae48c34 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Sep 2024 08:46:48 +1200 Subject: [PATCH 232/482] snap tests: re-enable test for --dangerous using smaller snap (#8861) --- tests/integration/targets/snap/tasks/main.yml | 5 +- .../targets/snap/tasks/test_dangerous.yml | 61 ++++++++++--------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index a2d8698d0f..e96fbde38b 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -15,9 +15,8 @@ ansible.builtin.include_tasks: test.yml - name: Include test_channel ansible.builtin.include_tasks: test_channel.yml - # TODO: Find better package to download and install from sources - cider 1.6.0 takes over 35 seconds to install - # - name: Include test_dangerous - # ansible.builtin.include_tasks: test_dangerous.yml + - name: Include test_dangerous + ansible.builtin.include_tasks: test_dangerous.yml - name: Include test_3dash ansible.builtin.include_tasks: test_3dash.yml - name: Include test_empty_list diff --git a/tests/integration/targets/snap/tasks/test_dangerous.yml b/tests/integration/targets/snap/tasks/test_dangerous.yml index 8fe4edee0b..e85725992d 100644 --- a/tests/integration/targets/snap/tasks/test_dangerous.yml +++ b/tests/integration/targets/snap/tasks/test_dangerous.yml @@ -5,43 +5,48 @@ # NOTE This is currently disabled for performance reasons! -- name: Make sure package is not installed (cider) +- name: Make sure package is not installed (bpytop) community.general.snap: - name: cider + name: bpytop state: absent -- name: Download cider snap - ansible.builtin.get_url: - url: https://github.com/ciderapp/cider-releases/releases/download/v1.6.0/cider_1.6.0_amd64.snap - dest: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - mode: "0644" +- name: Download bpytop snap + ansible.builtin.command: + cmd: snap download bpytop + chdir: "{{ remote_tmp_dir }}" + register: bpytop_download -# Test for https://github.com/ansible-collections/community.general/issues/5715 -- name: Install package from file (check) - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - check_mode: true - register: install_dangerous_check +- name: Test block + vars: + snap_file: "{{ (bpytop_download.stdout_lines[-1] | split(' '))[-1] }}" + snap_path: "{{ remote_tmp_dir }}/{{ snap_file }}" + block: + # Test for https://github.com/ansible-collections/community.general/issues/5715 + - name: Install package from file (check) + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + check_mode: true + register: install_dangerous_check -- name: Install package from file - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - register: install_dangerous + - name: Install package from file + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + register: install_dangerous -- name: Install package from file - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - register: install_dangerous_idempot + - name: Install package from file (again) + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + register: install_dangerous_idempot - name: Remove package community.general.snap: - name: cider + name: bpytop state: absent register: remove_dangerous From e4472b322bdd1e23f3f876110692a2128e0a9db8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Sep 2024 19:27:12 +1200 Subject: [PATCH 233/482] pipx/pipx_info: refactor doc fragment (#8859) * pipx/pipx_info: refactor doc fragment * pipx/pipx_info: refactor common options to module_utils --- .github/BOTMETA.yml | 2 ++ plugins/doc_fragments/pipx.py | 37 +++++++++++++++++++++++++++++ plugins/module_utils/pipx.py | 6 +++++ plugins/modules/pipx.py | 44 ++++++++++------------------------- plugins/modules/pipx_info.py | 28 +++------------------- 5 files changed, 60 insertions(+), 57 deletions(-) create mode 100644 plugins/doc_fragments/pipx.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index bc34755b31..c9326fa75a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -131,6 +131,8 @@ files: maintainers: $team_huawei $doc_fragments/nomad.py: maintainers: chris93111 apecnascimento + $doc_fragments/pipx.py: + maintainers: russoz $doc_fragments/xenserver.py: labels: xenserver maintainers: bvitnik diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py new file mode 100644 index 0000000000..112695f24f --- /dev/null +++ b/plugins/doc_fragments/pipx.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + global: + description: + - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. + - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. + Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version or newer. + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - > + If not specified, the module will use C(python -m pipx) to run the tool, + using the same Python interpreter as ansible itself. + type: path +notes: + - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). + - Please note that C(pipx) requires Python 3.6 or above. + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. + - > + This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) + passed using the R(environment Ansible keyword, playbooks_environment). + - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). +''' diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 9ae7b5381c..513b9081f6 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -9,6 +9,12 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +pipx_common_argspec = { + "global": dict(type='bool', default=False), + "executable": dict(type='path'), +} + + _state_map = dict( install='install', install_all='install-all', diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 4793dd49ea..4b94dee2ac 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -18,6 +18,7 @@ description: - Manage Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: - community.general.attributes + - community.general.pipx attributes: check_mode: support: full @@ -54,17 +55,18 @@ options: name: type: str description: - - > - The name of the application to be installed. It must to be a simple package name. - For passing package specifications or installing from URLs or directories, - please use the O(source) option. + - The name of the application. In C(pipx) documentation it is also referred to as + the name of the virtual environment where the application will be installed. + - If O(name) is a simple package name without version specifiers, + then that name is used as the Python package name to be installed. + - Use O(source) for passing package specifications or installing from URLs or directories. source: type: str description: - - > - If the application source, such as a package with version specifier, or an URL, - directory or any other accepted specification. See C(pipx) documentation for more details. - - When specified, the C(pipx) command will use O(source) instead of O(name). + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. install_apps: description: - Add apps from the injected packages. @@ -114,13 +116,6 @@ options: type: bool default: false version_added: 6.6.0 - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path editable: description: - Install the project in editable mode. @@ -139,12 +134,6 @@ options: type: str version_added: 9.3.0 global: - description: - - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. - - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. - Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version or newer. - type: bool - default: false version_added: 9.4.0 spec_metadata: description: @@ -154,19 +143,11 @@ options: type: path version_added: 9.4.0 notes: - - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). - - Please note that C(pipx) requires Python 3.6 or above. - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - > - This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR) - passed using the R(environment Ansible keyword, playbooks_environment). - > This first implementation does not verify whether a specified version constraint has been installed or not. Hence, when using version operators, C(pipx) module will always try to execute the operation, even when the application was previously installed. This feature will be added in the future. - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). author: - "Alexei Znamensky (@russoz)" ''' @@ -213,7 +194,7 @@ EXAMPLES = ''' import json from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec from ansible.module_utils.facts.compat import ansible_facts @@ -240,13 +221,12 @@ class PipX(StateModuleHelper): index_url=dict(type='str'), python=dict(type='str'), system_site_packages=dict(type='bool', default=False), - executable=dict(type='path'), editable=dict(type='bool', default=False), pip_args=dict(type='str'), suffix=dict(type='str'), spec_metadata=dict(type='path'), ) - argument_spec["global"] = dict(type='bool', default=False) + argument_spec.update(pipx_common_argspec) module = dict( argument_spec=argument_spec, diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 816729f9a6..0e0cc0fe14 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -19,6 +19,7 @@ description: extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.pipx options: name: description: @@ -40,30 +41,8 @@ options: - The raw output is not affected by O(include_deps) or O(include_injected). type: bool default: false - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path global: - description: - - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. - - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. - Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version or newer. - type: bool - default: false version_added: 9.3.0 -notes: - - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require C(pipx>=1.7.0). - - Please note that C(pipx) requires Python 3.6 or above. - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - > - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) - passed using the R(environment Ansible keyword, playbooks_environment). - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). author: - "Alexei Znamensky (@russoz)" ''' @@ -141,7 +120,7 @@ cmd: import json from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec from ansible.module_utils.facts.compat import ansible_facts @@ -153,9 +132,8 @@ class PipXInfo(ModuleHelper): include_deps=dict(type='bool', default=False), include_injected=dict(type='bool', default=False), include_raw=dict(type='bool', default=False), - executable=dict(type='path'), ) - argument_spec["global"] = dict(type='bool', default=False) + argument_spec.update(pipx_common_argspec) module = dict( argument_spec=argument_spec, supports_check_mode=True, From 2f1df973a69bba541f5af422c8344fa20ad34606 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 16 Sep 2024 21:35:06 +0200 Subject: [PATCH 234/482] Remove private key and certificates from documentation (#8870) * Remove private key and certificate from example. * Censor certificates in examples. --- plugins/modules/java_keystore.py | 6 +- plugins/modules/manageiq_provider.py | 136 ++------------------------- 2 files changed, 10 insertions(+), 132 deletions(-) diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index 2aeab75c06..7da52cc057 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -150,13 +150,11 @@ EXAMPLES = ''' name: example certificate: | -----BEGIN CERTIFICATE----- - h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69 - MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB + h19dUZ2co2f... -----END CERTIFICATE----- private_key: | -----BEGIN RSA PRIVATE KEY----- - DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3 - GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99 + DBVFTEVDVFJ... -----END RSA PRIVATE KEY----- password: changeit dest: /etc/security/keystore.jks diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index af5c147f46..35c73a38b3 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -304,22 +304,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -330,22 +315,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1:80' @@ -367,22 +337,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -392,22 +347,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -455,22 +395,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: hostname: 'metrics.example.com' @@ -480,22 +405,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -551,22 +461,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- ssh_keypair: hostname: director.example.com @@ -590,22 +485,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: role: amqp From 4123934b461ee6bd0cf294bbb337ad328747f72a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:57:47 +1200 Subject: [PATCH 235/482] reformat xfconf docs (#8875) * reformat module docs * fix sanity --- plugins/modules/xfconf.py | 172 ++++++++++++++++----------------- plugins/modules/xfconf_info.py | 131 +++++++++++++------------ 2 files changed, 150 insertions(+), 153 deletions(-) diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 15943ae59d..2e1e67ff32 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -8,26 +8,27 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: xfconf author: - - "Joseph Benden (@jbenden)" - - "Alexei Znamensky (@russoz)" +- "Joseph Benden (@jbenden)" +- "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: - - This module allows for the manipulation of Xfce 4 Configuration with the help of - xfconf-query. Please see the xfconf-query(1) man page for more details. +- This module allows for the manipulation of Xfce 4 Configuration with the help of xfconf-query. Please see the xfconf-query(1) man page for more + details. seealso: - - name: xfconf-query(1) man page - description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. - link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' +- name: xfconf-query(1) man page + description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. + link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' - - name: xfconf - Configuration Storage System - description: XFCE documentation for the Xfconf configuration system. - link: 'https://docs.xfce.org/xfce/xfconf/start' +- name: xfconf - Configuration Storage System + description: XFCE documentation for the Xfconf configuration system. + link: 'https://docs.xfce.org/xfce/xfconf/start' extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: @@ -38,55 +39,50 @@ attributes: options: channel: description: - - A Xfconf preference channel is a top-level tree key, inside of the - Xfconf repository that corresponds to the location for which all - application properties/keys are stored. See man xfconf-query(1). + - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application + properties/keys are stored. See man xfconf-query(1). required: true type: str property: description: - - A Xfce preference key is an element in the Xfconf repository - that corresponds to an application preference. See man xfconf-query(1). + - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man xfconf-query(1). required: true type: str value: description: - - Preference properties typically have simple values such as strings, - integers, or lists of strings and integers. See man xfconf-query(1). + - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See man xfconf-query(1). type: list elements: raw value_type: description: - - The type of value being set. - - When providing more than one O(value_type), the length of the list must - be equal to the length of O(value). - - If only one O(value_type) is provided, but O(value) contains more than - on element, that O(value_type) will be applied to all elements of O(value). - - If the O(property) being set is an array and it can possibly have only one - element in the array, then O(force_array=true) must be used to ensure - that C(xfconf-query) will interpret the value as an array rather than a - scalar. - - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. + - The type of value being set. + - When providing more than one O(value_type), the length of the list must be equal to the length of O(value). + - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) will be applied to all elements of + O(value). + - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) must be used to + ensure that C(xfconf-query) will interpret the value as an array rather than a scalar. + - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. type: list elements: str - choices: [ string, int, double, bool, uint, uchar, char, uint64, int64, float ] + choices: [string, int, double, bool, uint, uchar, char, uint64, int64, float] state: type: str description: - - The action to take upon the property/value. - - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. - choices: [ present, absent ] + - The action to take upon the property/value. + - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. + choices: [present, absent] default: "present" force_array: description: - - Force array even if only one element. + - Force array even if only one element. type: bool default: false aliases: ['array'] version_added: 1.0.0 -''' +""" EXAMPLES = """ +--- - name: Change the DPI to "192" xfconf: channel: "xsettings" @@ -110,60 +106,58 @@ EXAMPLES = """ force_array: true """ -RETURN = ''' - channel: - description: The channel specified in the module parameters - returned: success - type: str - sample: "xsettings" - property: - description: The property specified in the module parameters - returned: success - type: str - sample: "/Xft/DPI" - value_type: - description: - - The type of the value that was changed (V(none) for O(state=reset)). - Either a single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"int" or ["str", "str", "str"]' - value: - description: - - The value of the preference key after executing the module. Either a - single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"192" or ["orange", "yellow", "violet"]' - previous_value: - description: - - The value of the preference key before executing the module. - Either a single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"96" or ["red", "blue", "green"]' - cmd: - description: - - A list with the resulting C(xfconf-query) command executed by the module. - returned: success - type: list - elements: str - version_added: 5.4.0 - sample: - - /usr/bin/xfconf-query - - --channel - - xfce4-panel - - --property - - /plugins/plugin-19/timezone - - --create - - --type - - string - - --set - - Pacific/Auckland -''' +RETURN = """ +--- +channel: + description: The channel specified in the module parameters + returned: success + type: str + sample: "xsettings" +property: + description: The property specified in the module parameters + returned: success + type: str + sample: "/Xft/DPI" +value_type: + description: + - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"int" or ["str", "str", "str"]' +value: + description: + - The value of the preference key after executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"192" or ["orange", "yellow", "violet"]' +previous_value: + description: + - The value of the preference key before executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"96" or ["red", "blue", "green"]' +cmd: + description: + - A list with the resulting C(xfconf-query) command executed by the module. + returned: success + type: list + elements: str + version_added: 5.4.0 + sample: + - /usr/bin/xfconf-query + - --channel + - xfce4-panel + - --property + - /plugins/plugin-19/timezone + - --create + - --type + - string + - --set + - Pacific/Auckland +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index 3d56a70cb9..aba0d912ff 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -7,17 +7,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: xfconf_info author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Retrieve XFCE4 configurations version_added: 3.5.0 description: - - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). +- This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module +- community.general.attributes +- community.general.attributes.info_module attributes: check_mode: version_added: 3.3.0 @@ -40,10 +41,11 @@ options: - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel). type: str notes: - - See man xfconf-query(1) for more details. -''' +- See man xfconf-query(1) for more details. +""" EXAMPLES = """ +--- - name: Get list of all available channels community.general.xfconf_info: {} register: result @@ -66,63 +68,64 @@ EXAMPLES = """ register: result """ -RETURN = ''' - channels: - description: - - List of available channels. - - Returned when the module receives no parameter at all. - returned: success - type: list - elements: str - sample: - - xfce4-desktop - - displays - - xsettings - - xfwm4 - properties: - description: - - List of available properties for a specific channel. - - Returned by passing only the O(channel) parameter to the module. - returned: success - type: list - elements: str - sample: - - /Gdk/WindowScalingFactor - - /Gtk/ButtonImages - - /Gtk/CursorThemeSize - - /Gtk/DecorationLayout - - /Gtk/FontName - - /Gtk/MenuImages - - /Gtk/MonospaceFontName - - /Net/DoubleClickTime - - /Net/IconThemeName - - /Net/ThemeName - - /Xft/Antialias - - /Xft/Hinting - - /Xft/HintStyle - - /Xft/RGBA - is_array: - description: - - Flag indicating whether the property is an array or not. - returned: success - type: bool - value: - description: - - The value of the property. Empty if the property is of array type. - returned: success - type: str - sample: Monospace 10 - value_array: - description: - - The array value of the property. Empty if the property is not of array type. - returned: success - type: list - elements: str - sample: - - Main - - Work - - Tmp -''' +RETURN = """ +--- +channels: + description: + - List of available channels. + - Returned when the module receives no parameter at all. + returned: success + type: list + elements: str + sample: + - xfce4-desktop + - displays + - xsettings + - xfwm4 +properties: + description: + - List of available properties for a specific channel. + - Returned by passing only the O(channel) parameter to the module. + returned: success + type: list + elements: str + sample: + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA +is_array: + description: + - Flag indicating whether the property is an array or not. + returned: success + type: bool +value: + description: + - The value of the property. Empty if the property is of array type. + returned: success + type: str + sample: Monospace 10 +value_array: + description: + - The array value of the property. Empty if the property is not of array type. + returned: success + type: list + elements: str + sample: + - Main + - Work + - Tmp +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner From f93883aa204ac57e4e39ad09d644d02f17b35096 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Mik=C3=A1cz=C3=B3?= Date: Tue, 17 Sep 2024 14:53:55 +0200 Subject: [PATCH 236/482] gitlab_runner: update requirements in docs (#8860) * Update gitlab_runner.py Be specific related requirements for package version. This difference change the whole dependency chain for playbook. * Update plugins/modules/gitlab_runner.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/gitlab_runner.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index b11e029103..a285c4030a 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -33,7 +33,10 @@ author: - Samy Coenen (@SamyCoenen) - Guillaume Martinez (@Lunik) requirements: - - python-gitlab >= 1.5.0 + - python-gitlab >= 1.5.0 for legacy runner registration workflow + (runner registration token - U(https://docs.gitlab.com/runner/register/#register-with-a-runner-registration-token-deprecated)) + - python-gitlab >= 4.0.0 for new runner registration workflow + (runner authentication token - U(https://docs.gitlab.com/runner/register/#register-with-a-runner-authentication-token)) extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab From 80f48cceb4587edb6199917b816588bd0e00c06c Mon Sep 17 00:00:00 2001 From: Mike Raineri Date: Wed, 18 Sep 2024 11:33:30 -0400 Subject: [PATCH 237/482] Redfish: Added steps to allow a user to change their password when their account requires a password change (#8653) * Redfish: Added steps to allow a user to change their password when their account requires a password change Signed-off-by: Mike Raineri * Bug fix Signed-off-by: Mike Raineri * Bug fix Signed-off-by: Mike Raineri * Bug fixes with return data handling Signed-off-by: Mike Raineri * Added changelog fragment Signed-off-by: Mike Raineri * Update changelogs/fragments/8652-Redfish-Password-Change-Required.yml Co-authored-by: Felix Fontein --------- Signed-off-by: Mike Raineri Co-authored-by: Felix Fontein --- .../8652-Redfish-Password-Change-Required.yml | 2 + plugins/module_utils/redfish_utils.py | 95 ++++++++++++++----- plugins/modules/redfish_command.py | 15 ++- 3 files changed, 83 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/8652-Redfish-Password-Change-Required.yml diff --git a/changelogs/fragments/8652-Redfish-Password-Change-Required.yml b/changelogs/fragments/8652-Redfish-Password-Change-Required.yml new file mode 100644 index 0000000000..44cfd41430 --- /dev/null +++ b/changelogs/fragments/8652-Redfish-Password-Change-Required.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index b7fdeb3a52..c1efd00b70 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -165,11 +165,11 @@ class RedfishUtils(object): if not allow_no_resp: raise except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} @@ -208,11 +208,11 @@ class RedfishUtils(object): data = None headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} @@ -256,11 +256,11 @@ class RedfishUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'changed': False, 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'changed': False, 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} @@ -291,11 +291,11 @@ class RedfishUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on PUT request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'" % (uri, e.reason)} @@ -317,11 +317,11 @@ class RedfishUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} @@ -391,8 +391,10 @@ class RedfishUtils(object): :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error + :return: the JSON data of the response if present """ msg = http_client.responses.get(error.code, '') + data = None if error.code >= 400: try: body = error.read().decode('utf-8') @@ -406,7 +408,7 @@ class RedfishUtils(object): msg = str(data['error']['@Message.ExtendedInfo']) except Exception: pass - return msg + return msg, data def _init_session(self): pass @@ -1245,32 +1247,49 @@ class RedfishUtils(object): return response return {'ret': True, 'changed': True} - def _find_account_uri(self, username=None, acct_id=None): + def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): if not any((username, acct_id)): return {'ret': False, 'msg': 'Must provide either account_id or account_username'} - response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: - return response - data = response['data'] - - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) + if password_change_uri: + # Password change required; go directly to the specified URI + response = self.get_request(self.root_uri + password_change_uri) if response['ret'] is False: - continue + return response data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} + else: + # Walk the accounts collection to find the desired user + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} @@ -1491,7 +1510,8 @@ class RedfishUtils(object): 'Must provide account_password for UpdateUserPassword command'} response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) + acct_id=user.get('account_id'), + password_change_uri=user.get('account_passwordchangerequired')) if not response['ret']: return response @@ -1534,6 +1554,31 @@ class RedfishUtils(object): resp['msg'] = 'Modified account service' return resp + def check_password_change_required(self, return_data): + """ + Checks a response if a user needs to change their password + + :param return_data: The return data for a failed request + :return: None or the URI of the account to update + """ + uri = None + if 'data' in return_data: + # Find the extended messages in the response payload + extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + if len(extended_messages) == 0: + extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + # Go through each message and look for Base.1.X.PasswordChangeRequired + for message in extended_messages: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + # Password change required; get the URI of the user account + uri = message['MessageArgs'][0] + break + return uri + def get_sessions(self): result = {} # listing all users has always been slower than other operations, why? diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index f9b0c8bd3b..df541a1bd3 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -911,6 +911,7 @@ def main(): 'account_oemaccounttypes': module.params['oem_account_types'], 'account_updatename': module.params['update_username'], 'account_properties': module.params['account_properties'], + 'account_passwordchangerequired': None, } # timeout @@ -983,10 +984,16 @@ def main(): # execute only if we find an Account service resource result = rf_utils._find_accountservice_resource() if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - result = ACCOUNTS_COMMANDS[command](user) + # If a password change is required and the user is attempting to + # modify their password, try to proceed. + user['account_passwordchangerequired'] = rf_utils.check_password_change_required(result) + if len(command_list) == 1 and command_list[0] == "UpdateUserPassword" and user['account_passwordchangerequired']: + result = rf_utils.update_user_password(user) + else: + module.fail_json(msg=to_native(result['msg'])) + else: + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) elif category == "Systems": # execute only if we find a System resource From 6af74d1ba6a8f5378d4fa775bc474f525599e18f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 19 Sep 2024 03:34:19 +1200 Subject: [PATCH 238/482] multiple modules: improve dict.items() loops (#8876) * multiple modules: improve dict.items() loops * simplify in memset_* modules * add changelog frag --- changelogs/fragments/8876-dict-items-loop.yml | 16 ++++++++++++++++ plugins/modules/gitlab_deploy_key.py | 6 +++--- plugins/modules/gitlab_group.py | 6 +++--- plugins/modules/gitlab_issue.py | 6 +++--- plugins/modules/gitlab_merge_request.py | 6 +++--- plugins/modules/gitlab_runner.py | 12 ++++++------ plugins/modules/icinga2_host.py | 4 +--- plugins/modules/memset_dns_reload.py | 4 +--- plugins/modules/memset_memstore_info.py | 4 +--- plugins/modules/memset_server_info.py | 4 +--- plugins/modules/memset_zone.py | 4 +--- plugins/modules/memset_zone_domain.py | 4 +--- plugins/modules/memset_zone_record.py | 4 +--- plugins/modules/nmcli.py | 2 +- plugins/modules/scaleway_user_data.py | 2 +- plugins/modules/udm_dns_record.py | 3 +-- 16 files changed, 44 insertions(+), 43 deletions(-) create mode 100644 changelogs/fragments/8876-dict-items-loop.yml diff --git a/changelogs/fragments/8876-dict-items-loop.yml b/changelogs/fragments/8876-dict-items-loop.yml new file mode 100644 index 0000000000..6bd170c7b2 --- /dev/null +++ b/changelogs/fragments/8876-dict-items-loop.yml @@ -0,0 +1,16 @@ +minor_changes: + - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). + - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py index 7c0ff06b7b..ab89520248 100644 --- a/plugins/modules/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -196,9 +196,9 @@ class GitLabDeployKey(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deploy_key, arg_key) != arguments[arg_key]: - setattr(deploy_key, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(deploy_key, arg_key) != arg_value: + setattr(deploy_key, arg_key, arg_value) changed = True return (changed, deploy_key) diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 1f4dadff70..04a8f6c81b 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -277,9 +277,9 @@ class GitLabGroup(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(group, arg_key) != arg_value: + setattr(group, arg_key, arg_value) changed = True return (changed, group) diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py index 3277c4f1aa..1ad7d04822 100644 --- a/plugins/modules/gitlab_issue.py +++ b/plugins/modules/gitlab_issue.py @@ -264,14 +264,14 @@ class GitlabIssue(object): if key == 'milestone_id': old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" - if options[key] != old_milestone: + if value != old_milestone: return True elif key == 'assignee_ids': - if options[key] != sorted([user["id"] for user in getattr(issue, 'assignees')]): + if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): return True elif key == 'labels': - if options[key] != sorted(getattr(issue, key)): + if value != sorted(getattr(issue, key)): return True elif getattr(issue, key) != value: diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py index 5bb9cb9c7d..8e14f0a181 100644 --- a/plugins/modules/gitlab_merge_request.py +++ b/plugins/modules/gitlab_merge_request.py @@ -263,15 +263,15 @@ class GitlabMergeRequest(object): key = 'force_remove_source_branch' if key == 'assignee_ids': - if options[key] != sorted([user["id"] for user in getattr(mr, 'assignees')]): + if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): return True elif key == 'reviewer_ids': - if options[key] != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): return True elif key == 'labels': - if options[key] != sorted(getattr(mr, key)): + if value != sorted(getattr(mr, key)): return True elif getattr(mr, key) != value: diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index a285c4030a..68e50f05ec 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -368,18 +368,18 @@ class GitLabRunner(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): + if arg_value is not None: + if isinstance(arg_value, list): list1 = getattr(runner, arg_key) list1.sort() - list2 = arguments[arg_key] + list2 = arg_value list2.sort() if list1 != list2: - setattr(runner, arg_key, arguments[arg_key]) + setattr(runner, arg_key, arg_value) changed = True else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) + if getattr(runner, arg_key) != arg_value: + setattr(runner, arg_key, arg_value) changed = True return (changed, runner) diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py index ec04d8df74..5abbc43687 100644 --- a/plugins/modules/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -282,9 +282,7 @@ def main(): 'vars.made_by': "ansible" } } - - for key, value in variables.items(): - data['attrs']['vars.' + key] = value + data['attrs'].update({'vars.' + key: value for key, value in variables.items()}) changed = False if icinga.exists(name): diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py index 668c8c0bf3..8cff51ade1 100644 --- a/plugins/modules/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -178,9 +178,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = reload_dns(args) diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py index c00ef15eb4..5dfd1f956a 100644 --- a/plugins/modules/memset_memstore_info.py +++ b/plugins/modules/memset_memstore_info.py @@ -163,9 +163,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py index 78ea99df31..40862ae944 100644 --- a/plugins/modules/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -288,9 +288,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py index f520d54460..e405ad3e86 100644 --- a/plugins/modules/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -300,9 +300,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py index e07ac1ff02..7443e6c256 100644 --- a/plugins/modules/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -244,9 +244,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 80838a26a3..349240b84e 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -374,9 +374,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # perform some Memset API-specific validation diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 6f0884da92..e48183d049 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -1944,7 +1944,7 @@ class Nmcli(object): convert_func = self.list_to_string if callable(convert_func): - options[setting] = convert_func(options[setting]) + options[setting] = convert_func(value) return options diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py index 601231def9..72046ff532 100644 --- a/plugins/modules/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -149,7 +149,7 @@ def core(module): # Then we patch keys that are different for key, value in user_data.items(): - if key not in present_user_data or user_data[key] != present_user_data[key]: + if key not in present_user_data or value != present_user_data[key]: changed = True if compute_api.module.check_mode: diff --git a/plugins/modules/udm_dns_record.py b/plugins/modules/udm_dns_record.py index 99fe10c63e..857792993d 100644 --- a/plugins/modules/udm_dns_record.py +++ b/plugins/modules/udm_dns_record.py @@ -196,8 +196,7 @@ def main(): else: obj['name'] = name - for k, v in data.items(): - obj[k] = v + obj.update(data) diff = obj.diff() changed = obj.diff() != [] if not module.check_mode: From 27cb0c9090dd8178170d5c95293e2664321fcab8 Mon Sep 17 00:00:00 2001 From: Florian Weber Date: Fri, 20 Sep 2024 20:07:00 +0200 Subject: [PATCH 239/482] Update example for community.general.homebrew_services (#8886) --- plugins/modules/homebrew_services.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py index 2794025b29..96e81bea63 100644 --- a/plugins/modules/homebrew_services.py +++ b/plugins/modules/homebrew_services.py @@ -61,17 +61,17 @@ EXAMPLES = """ state: present - name: Start the foo service (equivalent to `brew services start foo`) - community.general.homebrew_service: + community.general.homebrew_services: name: foo state: present - name: Restart the foo service (equivalent to `brew services restart foo`) - community.general.homebrew_service: + community.general.homebrew_services: name: foo state: restarted - name: Remove the foo service (equivalent to `brew services stop foo`) - community.general.homebrew_service: + community.general.homebrew_services: name: foo service_state: absent """ From ac302eb77d82f5ed87cf8b037297c3482622247d Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Sat, 21 Sep 2024 09:30:40 +0200 Subject: [PATCH 240/482] keycloak_user_federation: set `krbPrincipalAttribute` to `''` if unset in kc responses (#8785) * set `krbPrincipalAttribute` to `''` if unset in kc before and after responses * add changelog fragment * Update changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...cipalAttribute-to-empty-string-if-missing.yaml | 2 ++ plugins/modules/keycloak_user_federation.py | 15 ++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml diff --git a/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml b/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml new file mode 100644 index 0000000000..c8a6ff752a --- /dev/null +++ b/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 6034aa8b84..06283a025e 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -721,15 +721,20 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode from copy import deepcopy +def normalize_kc_comp(comp): + # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; + # to make change detection and diff more accurate we set it again in the kc responses + if 'config' in comp: + if 'krbPrincipalAttribute' not in comp['config']: + comp['config']['krbPrincipalAttribute'] = [''] + + def sanitize(comp): compcopy = deepcopy(comp) if 'config' in compcopy: compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} if 'bindCredential' in compcopy['config']: compcopy['config']['bindCredential'] = '**********' - # an empty string is valid for krbPrincipalAttribute but is filtered out in diff - if 'krbPrincipalAttribute' not in compcopy['config']: - compcopy['config']['krbPrincipalAttribute'] = '' if 'mappers' in compcopy: for mapper in compcopy['mappers']: if 'config' in mapper: @@ -885,6 +890,8 @@ def main(): if cid is not None and before_comp: before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + normalize_kc_comp(before_comp) + # Build a proposed changeset from parameters given to this module changeset = {} @@ -994,6 +1001,7 @@ def main(): kc.delete_component(default_mapper['id'], realm) after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + normalize_kc_comp(after_comp) if module._diff: result['diff'] = dict(before='', after=sanitize(after_comp)) result['end_state'] = sanitize(after_comp) @@ -1041,6 +1049,7 @@ def main(): after_comp = kc.get_component(cid, realm) after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + normalize_kc_comp(after_comp) after_comp_sanitized = sanitize(after_comp) before_comp_sanitized = sanitize(before_comp) result['end_state'] = after_comp_sanitized From 38479ee9ff7060f497f4d99b661beca38e4e1693 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Sat, 21 Sep 2024 10:31:50 +0300 Subject: [PATCH 241/482] npm: Add force flag (#8885) * Add force flag for nmp module * Add CHANGELOG fragment * Add force to cmdrunner * Update CHANGELOG * Add comma --- .../fragments/8885-add-force-flag-for-nmp.yml | 2 ++ plugins/modules/npm.py | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8885-add-force-flag-for-nmp.yml diff --git a/changelogs/fragments/8885-add-force-flag-for-nmp.yml b/changelogs/fragments/8885-add-force-flag-for-nmp.yml new file mode 100644 index 0000000000..40eaeff74b --- /dev/null +++ b/changelogs/fragments/8885-add-force-flag-for-nmp.yml @@ -0,0 +1,2 @@ +minor_changes: + - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py index e6dc0b772a..a906b2c127 100644 --- a/plugins/modules/npm.py +++ b/plugins/modules/npm.py @@ -96,6 +96,12 @@ options: type: bool default: false version_added: 2.5.0 + force: + description: + - Use the C(--force) flag when installing. + type: bool + default: false + version_added: 9.5.0 requirements: - npm installed in bin path (recommended /usr/local/bin) ''' @@ -117,6 +123,11 @@ EXAMPLES = r''' name: coffee-script global: true +- name: Force Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + force: true + - name: Remove the globally package "coffee-script". community.general.npm: name: coffee-script @@ -167,6 +178,7 @@ class Npm(object): self.state = kwargs['state'] self.no_optional = kwargs['no_optional'] self.no_bin_links = kwargs['no_bin_links'] + self.force = kwargs['force'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') @@ -191,6 +203,7 @@ class Npm(object): registry=cmd_runner_fmt.as_opt_val('--registry'), no_optional=cmd_runner_fmt.as_bool('--no-optional'), no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'), + force=cmd_runner_fmt.as_bool('--force'), ) ) @@ -212,7 +225,7 @@ class Npm(object): params['name_version'] = self.name_version if add_package_name else None with self.runner( - "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links", + "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links force", check_rc=check_rc, cwd=cwd ) as ctx: rc, out, err = ctx.run(**params) @@ -289,6 +302,7 @@ def main(): ci=dict(default=False, type='bool'), no_optional=dict(default=False, type='bool'), no_bin_links=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default=False, type='bool') module = AnsibleModule( @@ -318,7 +332,8 @@ def main(): unsafe_perm=module.params['unsafe_perm'], state=state, no_optional=module.params['no_optional'], - no_bin_links=module.params['no_bin_links']) + no_bin_links=module.params['no_bin_links'], + force=module.params['force']) changed = False if module.params['ci']: From 0bf84ba2b6b86bcfb93bde427e6ae0e7e9ed1439 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:09:36 +1200 Subject: [PATCH 242/482] fix comprehension (#8895) * fix comprehension * add changelog frag * Update changelogs/fragments/8895-fix-comprehension.yaml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/8895-fix-comprehension.yaml | 2 ++ plugins/modules/lxd_container.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8895-fix-comprehension.yaml diff --git a/changelogs/fragments/8895-fix-comprehension.yaml b/changelogs/fragments/8895-fix-comprehension.yaml new file mode 100644 index 0000000000..aecd0fd83e --- /dev/null +++ b/changelogs/fragments/8895-fix-comprehension.yaml @@ -0,0 +1,2 @@ +bugfixes: + - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888). diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index 88e502e7c8..5c5d8a4d8d 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -618,7 +618,7 @@ class LXDContainerManagement(object): data = (self._get_instance_state_json() or {}).get('metadata', None) or {} network = { k: v - for k, v in data.get('network', {}).items() + for k, v in (data.get('network') or {}).items() if k not in ignore_devices } addresses = { @@ -768,7 +768,7 @@ class LXDContainerManagement(object): self.old_instance_json = self._get_instance_json() self.old_sections = { section: adjust_content(content) - for section, content in self.old_instance_json.get('metadata', {}).items() + for section, content in (self.old_instance_json.get('metadata') or {}).items() if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) } From deaad6e5479d214da1b762c6166135bcb03d419a Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 23 Sep 2024 20:29:40 +0200 Subject: [PATCH 243/482] keycloak_realm: fix change detection in check mode by normalizing realms beforehand (#8877) * keycloak_realm: fix change detection in check mode by normalizing realms beforehand * add changelog fragment --- .../8877-keycloak_realm-sort-lists-before-change-detection.yaml | 2 ++ plugins/modules/keycloak_realm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml diff --git a/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml b/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml new file mode 100644 index 0000000000..3e19866289 --- /dev/null +++ b/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). \ No newline at end of file diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 6128c9e4c7..9bbcdb6b1a 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -803,7 +803,7 @@ def main(): if module._diff: result['diff'] = dict(before=sanitize_cr(before_norm), after=sanitize_cr(desired_norm)) - result['changed'] = (before_realm != desired_realm) + result['changed'] = (before_norm != desired_norm) module.exit_json(**result) From a32f1d699bffac90a4cbe11cb5c11a0851a6666e Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:32:39 +0300 Subject: [PATCH 244/482] ipa_hostgroup: fix state params (#8900) * Fix ipa_hostgroup * Add CHANGELOG fragment --- changelogs/fragments/8900-ipa-hostgroup-fix-states.yml | 2 ++ plugins/modules/ipa_hostgroup.py | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8900-ipa-hostgroup-fix-states.yml diff --git a/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml b/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml new file mode 100644 index 0000000000..c7347e879f --- /dev/null +++ b/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml @@ -0,0 +1,2 @@ +bugfixes: + - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900). diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py index 70749c35b3..9e6abf32aa 100644 --- a/plugins/modules/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -57,13 +57,14 @@ options: state: description: - State to ensure. + - V("absent") and V("disabled") give the same results. + - V("present") and V("enabled") give the same results. default: "present" choices: ["absent", "disabled", "enabled", "present"] type: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes - ''' EXAMPLES = r''' @@ -160,7 +161,7 @@ def ensure(module, client): module_hostgroup = get_hostgroup_dict(description=module.params['description']) changed = False - if state == 'present': + if state in ['present', 'enabled']: if not ipa_hostgroup: changed = True if not module.check_mode: From 293021c3dd3a7c9287bfb2fb1d4ace0a6264e7a8 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 24 Sep 2024 13:39:39 +0300 Subject: [PATCH 245/482] Add stable-2.18 to CI (#8910) Add stable-2.18 to CI. --- .azure-pipelines/azure-pipelines.yml | 159 +++++++++++++++++++-------- README.md | 2 +- tests/sanity/ignore-2.19.txt | 15 +++ tests/sanity/ignore-2.19.txt.license | 3 + 4 files changed, 133 insertions(+), 46 deletions(-) create mode 100644 tests/sanity/ignore-2.19.txt create mode 100644 tests/sanity/ignore-2.19.txt.license diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8db5107f4c..14fd4fd058 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -73,6 +73,19 @@ stages: - test: 3 - test: 4 - test: extra + - stage: Sanity_2_18 + displayName: Sanity 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.18/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 - stage: Sanity_2_17 displayName: Sanity 2.17 dependsOn: [] @@ -128,6 +141,17 @@ stages: - test: '3.11' - test: '3.12' - test: '3.13' + - stage: Units_2_18 + displayName: Units 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.18/units/{0}/1 + targets: + - test: 3.8 + - test: "3.13" - stage: Units_2_17 displayName: Units 2.17 dependsOn: [] @@ -200,6 +224,20 @@ stages: - 1 - 2 - 3 + - stage: Remote_2_18 + displayName: Remote 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/{0} + targets: + - name: RHEL 9.4 + test: rhel/9.4 + groups: + - 1 + - 2 + - 3 - stage: Remote_2_17 displayName: Remote 2.17 dependsOn: [] @@ -282,6 +320,20 @@ stages: - 1 - 2 - 3 + - stage: Docker_2_18 + displayName: Docker 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/linux/{0} + targets: + - name: Ubuntu 24.04 + test: ubuntu2404 + groups: + - 1 + - 2 + - 3 - stage: Docker_2_17 displayName: Docker 2.17 dependsOn: [] @@ -356,75 +408,92 @@ stages: - 3 ### Generic - - stage: Generic_devel - displayName: Generic devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/generic/{0}/1 - targets: - - test: '3.8' - - test: '3.11' - - test: '3.13' - - stage: Generic_2_17 - displayName: Generic 2.17 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.17/generic/{0}/1 - targets: - - test: '3.7' - - test: '3.12' - - stage: Generic_2_16 - displayName: Generic 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.16/generic/{0}/1 - targets: - - test: '2.7' - - test: '3.6' - - test: '3.11' - - stage: Generic_2_15 - displayName: Generic 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.15/generic/{0}/1 - targets: - - test: '3.9' +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.11' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' +# - stage: Generic_2_17 +# displayName: Generic 2.17 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.17/generic/{0}/1 +# targets: +# - test: '3.7' +# - test: '3.12' +# - stage: Generic_2_16 +# displayName: Generic 2.16 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.16/generic/{0}/1 +# targets: +# - test: '2.7' +# - test: '3.6' +# - test: '3.11' +# - stage: Generic_2_15 +# displayName: Generic 2.15 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.15/generic/{0}/1 +# targets: +# - test: '3.9' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel + - Sanity_2_18 - Sanity_2_17 - Sanity_2_16 - Sanity_2_15 - Units_devel + - Units_2_18 - Units_2_17 - Units_2_16 - Units_2_15 - Remote_devel_extra_vms - Remote_devel + - Remote_2_18 - Remote_2_17 - Remote_2_16 - Remote_2_15 - Docker_devel + - Docker_2_18 - Docker_2_17 - Docker_2_16 - Docker_2_15 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel +# - Generic_2_18 # - Generic_2_17 # - Generic_2_16 # - Generic_2_15 diff --git a/README.md b/README.md index 53354b93f9..4edd58edb3 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ For more information about communication, see the [Ansible communication guide]( ## Tested with Ansible -Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. +Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements diff --git a/tests/sanity/ignore-2.19.txt b/tests/sanity/ignore-2.19.txt new file mode 100644 index 0000000000..806c4c5fcf --- /dev/null +++ b/tests/sanity/ignore-2.19.txt @@ -0,0 +1,15 @@ +plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice +plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin +plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen +plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice +plugins/modules/parted.py validate-modules:parameter-state-invalid-choice +plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice +plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.19.txt.license b/tests/sanity/ignore-2.19.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/sanity/ignore-2.19.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project From 199ba0a1708884e20267011e0b20ff2e4d5e03fa Mon Sep 17 00:00:00 2001 From: Niko Ehrenfeuchter Date: Tue, 24 Sep 2024 21:53:16 +0200 Subject: [PATCH 246/482] Fix parameter name (#8913) --- plugins/modules/btrfs_subvolume.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py index 35327bfe02..0aa38bf0e4 100644 --- a/plugins/modules/btrfs_subvolume.py +++ b/plugins/modules/btrfs_subvolume.py @@ -102,40 +102,40 @@ EXAMPLES = r''' - name: Create a @home subvolume under the root subvolume community.general.btrfs_subvolume: name: /@home - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Remove the @home subvolume if it exists community.general.btrfs_subvolume: name: /@home state: absent - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the root subvolume named @ community.general.btrfs_subvolume: name: /@ snapshot_source: / - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the root subvolume and make it the new default subvolume community.general.btrfs_subvolume: name: /@ snapshot_source: / default: Yes - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required community.general.btrfs_subvolume: name: /@snapshots/@2022_06_09 snapshot_source: /@ recursive: True - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Remove the /@ subvolume and recursively delete child subvolumes as required community.general.btrfs_subvolume: name: /@snapshots/@2022_06_09 snapshot_source: /@ recursive: True - device: /dev/vda2 + filesystem_device: /dev/vda2 ''' From 89ad40db4181d88dd4c8e7d68237f7a4a7e61bb4 Mon Sep 17 00:00:00 2001 From: Per Fide Date: Wed, 25 Sep 2024 12:54:46 +0200 Subject: [PATCH 247/482] proxmox inventory: remove duplicated credentials line (#8917) * proxmox inventory: remove duplicated credentials line * fixup! proxmox inventory: remove duplicated credentials line * fixup! proxmox inventory: remove duplicated credentials line --- changelogs/fragments/8917-proxmox-clean-auth.yml | 2 ++ plugins/inventory/proxmox.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8917-proxmox-clean-auth.yml diff --git a/changelogs/fragments/8917-proxmox-clean-auth.yml b/changelogs/fragments/8917-proxmox-clean-auth.yml new file mode 100644 index 0000000000..0681f326a6 --- /dev/null +++ b/changelogs/fragments/8917-proxmox-clean-auth.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index edfadfd8ad..d7e2107719 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -275,11 +275,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return self.session def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) if self.proxmox_password: - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) + credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) a = self._get_session() From d356e255e03d3a531597da2db2e526afc5144a9e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 25 Sep 2024 22:02:33 +0300 Subject: [PATCH 248/482] Deprecate hipchat module (#8922) Deprecate hipchat module. --- changelogs/fragments/deprecate-hipchat.yml | 2 ++ meta/runtime.yml | 4 ++++ plugins/modules/hipchat.py | 4 ++++ 3 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/deprecate-hipchat.yml diff --git a/changelogs/fragments/deprecate-hipchat.yml b/changelogs/fragments/deprecate-hipchat.yml new file mode 100644 index 0000000000..256991ce3b --- /dev/null +++ b/changelogs/fragments/deprecate-hipchat.yml @@ -0,0 +1,2 @@ +deprecated_features: + - "hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919)." diff --git a/meta/runtime.yml b/meta/runtime.yml index 4f5007b4a4..5d4ed8cb89 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -75,6 +75,10 @@ plugin_routing: deprecation: removal_version: 10.0.0 warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. + hipchat: + deprecation: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. rax_cbs_attachments: tombstone: removal_version: 9.0.0 diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py index 83e253679c..399d5c3bef 100644 --- a/plugins/modules/hipchat.py +++ b/plugins/modules/hipchat.py @@ -17,6 +17,10 @@ description: - Send a message to a Hipchat room, with options to control the formatting. extends_documentation_fragment: - community.general.attributes +deprecated: + removed_in: 11.0.0 + why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + alternative: There is none. attributes: check_mode: support: full From 4700accbff826ae9e393dc6c01914a80a85ea2d1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Sep 2024 18:49:15 +1200 Subject: [PATCH 249/482] CmdRunner: missing parameter for get_best_parsable_locale() (#8929) * CmdRunner: missing parameter for get_best_parsable_locale() * add changelog frag --- changelogs/fragments/8929-cmd_runner-bugfix.yml | 2 ++ plugins/module_utils/cmd_runner.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8929-cmd_runner-bugfix.yml diff --git a/changelogs/fragments/8929-cmd_runner-bugfix.yml b/changelogs/fragments/8929-cmd_runner-bugfix.yml new file mode 100644 index 0000000000..2d8e0170f6 --- /dev/null +++ b/changelogs/fragments/8929-cmd_runner-bugfix.yml @@ -0,0 +1,2 @@ +bugfixes: + - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 95167a282d..f9d6e98056 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -239,7 +239,7 @@ class CmdRunner(object): self.check_rc = check_rc if force_lang == "auto": try: - self.force_lang = get_best_parsable_locale() + self.force_lang = get_best_parsable_locale(module) except RuntimeWarning: self.force_lang = "C" else: From 0bc5f24863e98d153f89636193bba808b1f54e89 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:30:57 +0300 Subject: [PATCH 250/482] one_service: fix recreation (#8887) * Fix one_service unique creation * Revert empty space * Add CHANGELOG fragment * Update CHANGELOG fragment --- changelogs/fragments/8887-fix-one_service-unique.yml | 2 ++ plugins/modules/one_service.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8887-fix-one_service-unique.yml diff --git a/changelogs/fragments/8887-fix-one_service-unique.yml b/changelogs/fragments/8887-fix-one_service-unique.yml new file mode 100644 index 0000000000..979460b862 --- /dev/null +++ b/changelogs/fragments/8887-fix-one_service-unique.yml @@ -0,0 +1,2 @@ +bugfixes: + - one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887). diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py index 2c89e9b8ad..25ead72c1d 100644 --- a/plugins/modules/one_service.py +++ b/plugins/modules/one_service.py @@ -522,7 +522,7 @@ def create_service_and_operation(module, auth, template_id, service_name, owner_ if unique: service = get_service_by_name(module, auth, service_name) - if not service: + if not service or service["TEMPLATE"]["BODY"]["state"] == "DONE": if not module.check_mode: service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) changed = True @@ -637,7 +637,6 @@ def get_service_id_by_name(module, auth, service_name): def get_connection_info(module): - url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') From 002f13713456f033d2bead3553d425d4cbfb5949 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Sep 2024 21:01:36 +1200 Subject: [PATCH 251/482] 10.0.0: CmdRunner: remove deprecated feature (#8928) * remove deprecated feature * remove deprecated feature from tests as well * Adjust the integration test * add changelog frag --- .../fragments/8928-cmd-runner-10.0.0.yml | 2 ++ plugins/module_utils/cmd_runner.py | 22 ------------------- .../cmd_runner/tasks/test_cmd_echo.yml | 10 ++++++--- .../targets/cmd_runner/vars/main.yml | 21 ++++++------------ .../plugins/module_utils/test_cmd_runner.py | 15 ------------- 5 files changed, 16 insertions(+), 54 deletions(-) create mode 100644 changelogs/fragments/8928-cmd-runner-10.0.0.yml diff --git a/changelogs/fragments/8928-cmd-runner-10.0.0.yml b/changelogs/fragments/8928-cmd-runner-10.0.0.yml new file mode 100644 index 0000000000..bbeb838439 --- /dev/null +++ b/changelogs/fragments/8928-cmd-runner-10.0.0.yml @@ -0,0 +1,2 @@ +breaking_changes: + - cmd_runner module utils - CLI arguments created directly from module parameters are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index f9d6e98056..5cd4f6b957 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -10,7 +10,6 @@ import os from functools import wraps from ansible.module_utils.common.collections import is_sequence -from ansible.module_utils.six import iteritems from ansible.module_utils.common.locale import get_best_parsable_locale @@ -167,23 +166,6 @@ class _Format(object): default = [] return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) - @staticmethod - def as_default_type(_type, arg="", ignore_none=None): - # - # DEPRECATION: This method is deprecated and will be removed in community.general 10.0.0 - # - # Instead of using the implicit formats provided here, use the explicit necessary format method. - # - fmt = _Format - if _type == "dict": - return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none) - if _type == "list": - return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none) - if _type == "bool": - return fmt.as_bool("--{0}".format(arg)) - - return fmt.as_opt_val("--{0}".format(arg), ignore_none=ignore_none) - @staticmethod def unpack_args(func): @wraps(func) @@ -252,10 +234,6 @@ class CmdRunner(object): _cmd = self.command[0] self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) - for mod_param_name, spec in iteritems(module.argument_spec): - if mod_param_name not in self.arg_formats: - self.arg_formats[mod_param_name] = _Format.as_default_type(spec.get('type', 'str'), mod_param_name) - @property def binary(self): return self.command[0] diff --git a/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml b/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml index a2a9fb8b72..14e1557233 100644 --- a/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml +++ b/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: create copy of /bin/echo ({{ item.name }}) +- name: Create copy of /bin/echo ({{ item.name }}) ansible.builtin.copy: src: /bin/echo dest: "{{ item.copy_to }}/echo" @@ -11,7 +11,7 @@ remote_src: true when: item.copy_to is defined -- name: test cmd_echo module ({{ item.name }}) +- name: Test cmd_echo module ({{ item.name }}) cmd_echo: cmd: "{{ item.cmd | default(omit) }}" path_prefix: "{{ item.path_prefix | default(omit) }}" @@ -24,6 +24,10 @@ check_mode: "{{ item.check_mode | default(omit) }}" ignore_errors: "{{ item.expect_error | default(omit) }}" -- name: check results ({{ item.name }}) +- name: Debug test results ({{ item.name }}) + ansible.builtin.debug: + var: test_result + +- name: Check results ({{ item.name }}) _unsafe_assert: that: "{{ item.assertions }}" diff --git a/tests/integration/targets/cmd_runner/vars/main.yml b/tests/integration/targets/cmd_runner/vars/main.yml index f9a7153381..40c8d10af6 100644 --- a/tests/integration/targets/cmd_runner/vars/main.yml +++ b/tests/integration/targets/cmd_runner/vars/main.yml @@ -38,22 +38,12 @@ cmd_echo_tests: - test_result.out == "-- --answer=43 --bb-here\n" - test_result.err == "" - - name: implicit aa format - arg_formats: - bb: - func: as_bool - args: [--bb-here] - arg_order: ['aa', 'bb'] - arg_values: - bb: true - aa: 1984 - assertions: - - test_result.rc == 0 - - test_result.out == "-- --aa 1984 --bb-here\n" - - test_result.err == "" - - name: missing bb format arg_order: ['aa', 'bb'] + arg_formats: + aa: + func: as_opt_eq_val + args: [--answer] arg_values: bb: true aa: 1984 @@ -69,6 +59,9 @@ cmd_echo_tests: - name: missing bb value arg_formats: + aa: + func: as_opt_eq_val + args: [--answer] bb: func: as_bool args: [--bb-here] diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index da93292197..50d0a70094 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -6,7 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from sys import version_info from functools import partial import pytest @@ -40,10 +39,6 @@ TC_FORMATS = dict( simple_list_max_len_ok=(partial(cmd_runner_fmt.as_list, max_len=1), 42, ["42"], None), simple_list_max_len_fail=(partial(cmd_runner_fmt.as_list, max_len=2), [42, 42, 42], None, ValueError), simple_map=(partial(cmd_runner_fmt.as_map, {'a': 1, 'b': 2, 'c': 3}), 'b', ["2"], None), - simple_default_type__list=(partial(cmd_runner_fmt.as_default_type, "list"), [1, 2, 3, 5, 8], ["--1", "--2", "--3", "--5", "--8"], None), - simple_default_type__bool_true=(partial(cmd_runner_fmt.as_default_type, "bool", "what"), True, ["--what"], None), - simple_default_type__bool_false=(partial(cmd_runner_fmt.as_default_type, "bool", "what"), False, [], None), - simple_default_type__potato=(partial(cmd_runner_fmt.as_default_type, "any-other-type", "potato"), "42", ["--potato", "42"], None), simple_fixed_true=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), True, ["--always-here", "--forever"], None), simple_fixed_false=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), False, ["--always-here", "--forever"], None), simple_fixed_none=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), None, ["--always-here", "--forever"], None), @@ -52,16 +47,6 @@ TC_FORMATS = dict( stack_opt_val__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val), "-t"), ["potatoes", "bananas"], ["-t", "potatoes", "-t", "bananas"], None), stack_opt_eq_val__int=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_eq_val), "--answer"), [42, 17], ["--answer=42", "--answer=17"], None), ) -if tuple(version_info) >= (3, 1): - from collections import OrderedDict - - # needs OrderedDict to provide a consistent key order - TC_FORMATS["simple_default_type__dict"] = ( # type: ignore - partial(cmd_runner_fmt.as_default_type, "dict"), - OrderedDict((('a', 1), ('b', 2))), - ["--a=1", "--b=2"], - None - ) TC_FORMATS_IDS = sorted(TC_FORMATS.keys()) From bc6ae849b3cb8fbb307279407b2a1471a29370bc Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 26 Sep 2024 12:14:09 +0300 Subject: [PATCH 252/482] Move ansible-core 2.15 tests to EOL tests (#8933) Move ansible-core 2.15 tests to EOL tests. --- .azure-pipelines/azure-pipelines.yml | 77 +--------------------------- .github/workflows/ansible-test.yml | 23 ++++++++- 2 files changed, 24 insertions(+), 76 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 14fd4fd058..97f76b3ba9 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -112,19 +112,6 @@ stages: - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_15 - displayName: Sanity 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.15/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 ### Units - stage: Units_devel displayName: Units devel @@ -175,17 +162,6 @@ stages: - test: 2.7 - test: 3.6 - test: "3.11" - - stage: Units_2_15 - displayName: Units 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.15/units/{0}/1 - targets: - - test: 3.5 - - test: "3.10" ## Remote - stage: Remote_devel_extra_vms @@ -270,30 +246,10 @@ stages: test: rhel/9.2 - name: RHEL 8.8 test: rhel/8.8 - # - name: FreeBSD 13.2 - # test: freebsd/13.2 - groups: - - 1 - - 2 - - 3 - - stage: Remote_2_15 - displayName: Remote 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.15/{0} - targets: - - name: RHEL 9.1 - test: rhel/9.1 - - name: RHEL 8.7 - test: rhel/8.7 - name: RHEL 7.9 test: rhel/7.9 - # - name: FreeBSD 13.1 - # test: freebsd/13.1 - # - name: FreeBSD 12.4 - # test: freebsd/12.4 + # - name: FreeBSD 13.2 + # test: freebsd/13.2 groups: - 1 - 2 @@ -366,20 +322,6 @@ stages: test: opensuse15 - name: Alpine 3 test: alpine3 - groups: - - 1 - - 2 - - 3 - - stage: Docker_2_15 - displayName: Docker 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.15/linux/{0} - targets: - - name: Fedora 37 - test: fedora37 - name: CentOS 7 test: centos7 groups: @@ -455,16 +397,6 @@ stages: # - test: '2.7' # - test: '3.6' # - test: '3.11' -# - stage: Generic_2_15 -# displayName: Generic 2.15 -# dependsOn: [] -# jobs: -# - template: templates/matrix.yml -# parameters: -# nameFormat: Python {0} -# testFormat: 2.15/generic/{0}/1 -# targets: -# - test: '3.9' - stage: Summary condition: succeededOrFailed() @@ -473,29 +405,24 @@ stages: - Sanity_2_18 - Sanity_2_17 - Sanity_2_16 - - Sanity_2_15 - Units_devel - Units_2_18 - Units_2_17 - Units_2_16 - - Units_2_15 - Remote_devel_extra_vms - Remote_devel - Remote_2_18 - Remote_2_17 - Remote_2_16 - - Remote_2_15 - Docker_devel - Docker_2_18 - Docker_2_17 - Docker_2_16 - - Docker_2_15 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel # - Generic_2_18 # - Generic_2_17 # - Generic_2_16 -# - Generic_2_15 jobs: - template: templates/coverage.yml diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index e57213e9fa..89a3006f56 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -31,6 +31,7 @@ jobs: ansible: - '2.13' - '2.14' + - '2.15' # Ansible-test on various stable branches does not yet work well with cgroups v2. # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 # image for these stable branches. The list of branches where this is necessary will @@ -76,6 +77,10 @@ jobs: python: '3.8' - ansible: '2.14' python: '3.9' + - ansible: '2.15' + python: '3.5' + - ansible: '2.15' + python: '3.10' steps: - name: >- @@ -166,16 +171,32 @@ jobs: docker: alpine3 python: '' target: azp/posix/3/ + # 2.15 + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/1/ + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/2/ + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/3/ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - ansible: '2.13' # docker: default # python: '3.9' # target: azp/generic/1/ - # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - ansible: '2.14' # docker: default # python: '3.10' # target: azp/generic/1/ + # - ansible: '2.15' + # docker: default + # python: '3.9' + # target: azp/generic/1/ steps: - name: >- From 84e0190eee332ffba6e79132c2a78483f283b659 Mon Sep 17 00:00:00 2001 From: Pierre-yves Fontaniere Date: Fri, 27 Sep 2024 21:30:54 +0200 Subject: [PATCH 253/482] Disk description now contains a StorageId and a RedfishURI (#8937) * Disks controller is now uniquely identified by the controller ID * Fix typo `StorageID` to `StorageId` * Add changelog fragments --------- Co-authored-by: Pierre-yves FONTANIERE --- .../fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml | 2 ++ plugins/module_utils/redfish_utils.py | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml diff --git a/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml b/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml new file mode 100644 index 0000000000..6b66918234 --- /dev/null +++ b/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). \ No newline at end of file diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c1efd00b70..102d826e6d 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -866,6 +866,7 @@ class RedfishUtils(object): return response data = response['data'] controller_name = 'Controller 1' + storage_id = data['Id'] if 'Controllers' in data: controllers_uri = data['Controllers'][u'@odata.id'] @@ -900,6 +901,7 @@ class RedfishUtils(object): data = response['data'] drive_result = {} + drive_result['RedfishURI'] = data['@odata.id'] for property in properties: if property in data: if data[property] is not None: @@ -911,6 +913,7 @@ class RedfishUtils(object): drive_result[property] = data[property] drive_results.append(drive_result) drives = {'Controller': controller_name, + 'StorageId': storage_id, 'Drives': drive_results} result["entries"].append(drives) From fe18b05f08d37724ed7b20c6f3076cee7257330a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 28 Sep 2024 08:11:21 +0300 Subject: [PATCH 254/482] 10.0.0: remove deprecated features (#8918) * Remove pool option from redhat_subscription. * Remove proxmox_default_behavior option from proxmox_kvm. * Remove the logging option from ejabberd_user. * Remove the ack_params_state_absent option from consul. * irc: change defaults of use_tls and validate_certs to true. * rhsm_repository: remove states present and absent. * Re-add 'using'. * Fix typo. --- changelogs/fragments/deprecations.yml | 8 + plugins/modules/consul.py | 9 -- plugins/modules/ejabberd_user.py | 16 +- plugins/modules/irc.py | 33 +---- plugins/modules/proxmox_kvm.py | 73 +-------- plugins/modules/redhat_subscription.py | 139 +----------------- plugins/modules/rhsm_repository.py | 18 +-- .../targets/proxmox/tasks/main.yml | 1 - .../modules/test_redhat_subscription.py | 67 --------- .../plugins/modules/test_rhsm_repository.py | 33 ----- 10 files changed, 29 insertions(+), 368 deletions(-) create mode 100644 changelogs/fragments/deprecations.yml diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml new file mode 100644 index 0000000000..c8f4f6150a --- /dev/null +++ b/changelogs/fragments/deprecations.yml @@ -0,0 +1,8 @@ +removed_features: + - "redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead (https://github.com/ansible-collections/community.general/pull/8918)." + - "proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly specify the old default values if you were using ``proxmox_default_behavior=compatibility``, otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918)." + - "ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918)." + - "consul - removed the ``ack_params_state_absent`` option. It had no effect anymore (https://github.com/ansible-collections/community.general/pull/8918)." +breaking_changes: + - "irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/8918)." + - "rhsm_repository - the states ``present`` and ``absent`` have been removed. Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918)." diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py index fe1a898835..28beeec52d 100644 --- a/plugins/modules/consul.py +++ b/plugins/modules/consul.py @@ -170,10 +170,6 @@ options: type: str description: - The token key identifying an ACL rule set. May be required to register services. - ack_params_state_absent: - type: bool - description: - - This parameter has no more effect and is deprecated. It will be removed in community.general 10.0.0. ''' EXAMPLES = ''' @@ -598,11 +594,6 @@ def main(): timeout=dict(type='str'), tags=dict(type='list', elements='str'), token=dict(no_log=True), - ack_params_state_absent=dict( - type='bool', - removed_in_version='10.0.0', - removed_from_collection='community.general', - ), ), mutually_exclusive=[ ('script', 'ttl', 'tcp', 'http'), diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py index d0b575e1cd..b43e078a5d 100644 --- a/plugins/modules/ejabberd_user.py +++ b/plugins/modules/ejabberd_user.py @@ -41,12 +41,6 @@ options: description: - the password to assign to the username required: false - logging: - description: - - enables or disables the local syslog facility for this module - required: false - default: false - type: bool state: type: str description: @@ -75,8 +69,6 @@ EXAMPLES = ''' state: absent ''' -import syslog - from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt @@ -91,7 +83,6 @@ class EjabberdUser(object): def __init__(self, module): self.module = module - self.logging = module.params.get('logging') self.state = module.params.get('state') self.host = module.params.get('host') self.user = module.params.get('username') @@ -125,10 +116,8 @@ class EjabberdUser(object): return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc))) def log(self, entry): - """ This method will log information to the local syslog facility """ - if self.logging: - syslog.openlog('ansible-%s' % self.module._name) - syslog.syslog(syslog.LOG_NOTICE, entry) + """ This method does nothing """ + pass def run_command(self, cmd, options, process=None): """ This method will run the any command specified and return the @@ -169,7 +158,6 @@ def main(): username=dict(required=True, type='str'), password=dict(type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool', removed_in_version='10.0.0', removed_from_collection='community.general'), ), required_if=[ ('state', 'present', ['password']), diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py index e40ba2d0ba..748479e87b 100644 --- a/plugins/modules/irc.py +++ b/plugins/modules/irc.py @@ -85,10 +85,9 @@ options: was exlusively called O(use_ssl). The latter is now an alias of O(use_tls). - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. - - The option currently defaults to V(false). The default has been B(deprecated) and will - change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly - set this option to a value (preferably V(true)). + - The default of this option changed to V(true) in community.general 10.0.0. type: bool + default: true aliases: - use_ssl part: @@ -110,10 +109,9 @@ options: if the network between between Ansible and the IRC server is known to be safe. - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. - - The option currently defaults to V(false). The default has been B(deprecated) and will - change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly - set this option to a value (preferably V(true)). + - The default of this option changed to V(true) in community.general 10.0.0. type: bool + default: true version_added: 8.1.0 # informational: requirements for nodes @@ -313,8 +311,8 @@ def main(): passwd=dict(no_log=True), timeout=dict(type='int', default=30), part=dict(type='bool', default=True), - use_tls=dict(type='bool', aliases=['use_ssl']), - validate_certs=dict(type='bool'), + use_tls=dict(type='bool', default=True, aliases=['use_ssl']), + validate_certs=dict(type='bool', default=True), ), supports_check_mode=True, required_one_of=[['channel', 'nick_to']] @@ -338,25 +336,6 @@ def main(): style = module.params["style"] validate_certs = module.params["validate_certs"] - if use_tls is None: - module.deprecate( - 'The default of use_tls will change to true in community.general 10.0.0.' - ' Set a value now (preferably true, if possible) to avoid the deprecation warning.', - version='10.0.0', - collection_name='community.general', - ) - use_tls = False - - if validate_certs is None: - if use_tls: - module.deprecate( - 'The default of validate_certs will change to true in community.general 10.0.0.' - ' Set a value now (prefarably true, if possible) to avoid the deprecation warning.', - version='10.0.0', - collection_name='community.general', - ) - validate_certs = False - try: send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style) except Exception as e: diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 771ddd902f..cac3496228 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -14,7 +14,7 @@ module: proxmox_kvm short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster description: - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. - - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior). + - Since community.general 4.0.0 on, there are no more default values. author: "Abdoul Bah (@helldorado) " attributes: check_mode: @@ -32,7 +32,6 @@ options: acpi: description: - Specify if ACPI should be enabled/disabled. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true). type: bool agent: description: @@ -44,19 +43,15 @@ options: description: - Pass arbitrary arguments to kvm. - This option is for experts only! - - If O(proxmox_default_behavior) is set to V(compatibility), this option has a default of - V(-serial unix:/var/run/qemu-server/.serial,server,nowait). type: str autostart: description: - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). type: bool balloon: description: - Specify the amount of RAM for the VM in MB. - Using zero disables the balloon driver. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0). type: int bios: description: @@ -68,7 +63,6 @@ options: - Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n). - For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0). - You can combine to set order. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(cnd). type: str bootdisk: description: @@ -104,12 +98,10 @@ options: cores: description: - Specify number of cores per socket. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1). type: int cpu: description: - Specify emulated CPU type. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(kvm64). type: str cpulimit: description: @@ -120,7 +112,6 @@ options: description: - Specify CPU weight for a VM. - You can disable fair-scheduler configuration by setting this to 0 - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000). type: int delete: description: @@ -173,7 +164,6 @@ options: description: - Allow to force stop VM. - Can be used with states V(stopped), V(restarted), and V(absent). - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). - Requires parameter O(archive). type: bool format: @@ -184,8 +174,7 @@ options: - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(qcow2). - If O(proxmox_default_behavior) is set to V(no_defaults), not specifying this option is equivalent to setting it to V(unspecified). + - Not specifying this option is equivalent to setting it to V(unspecified). type: str choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] freeze: @@ -257,7 +246,6 @@ options: kvm: description: - Enable/disable KVM hardware virtualization. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true). type: bool localtime: description: @@ -277,7 +265,6 @@ options: memory: description: - Memory size in MB for instance. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512). type: int migrate: description: @@ -340,13 +327,11 @@ options: onboot: description: - Specifies whether a VM will be started during system bootup. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true). type: bool ostype: description: - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. - The l26 is Linux 2.6/3.X Kernel. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(l26). type: str choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris'] parallel: @@ -439,7 +424,6 @@ options: sockets: description: - Sets the number of CPU sockets. (1 - N). - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1). type: int sshkeys: description: @@ -472,7 +456,6 @@ options: tablet: description: - Enables/disables the USB tablet device. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). type: bool tags: description: @@ -494,7 +477,6 @@ options: template: description: - Enables/disables the template. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). type: bool timeout: description: @@ -553,7 +535,6 @@ options: vga: description: - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. - - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(std). type: str choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] virtio: @@ -571,24 +552,6 @@ options: description: - Creates a virtual hardware watchdog device. type: str - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value V(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is V(no_defaults), - which makes sure these options have no defaults. - - This affects the O(acpi), O(autostart), O(balloon), O(boot), O(cores), O(cpu), - O(cpuunits), O(force), O(format), O(kvm), O(memory), O(onboot), O(ostype), O(sockets), - O(tablet), O(template), and O(vga) options. - - This option is deprecated and will be removed in community.general 10.0.0. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" seealso: - module: community.general.proxmox_vm_info extends_documentation_fragment: @@ -1143,10 +1106,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): kwargs['tags'] = ",".join(kwargs['tags']) # -args and skiplock require root@pam user - but can not use api tokens - if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None: - if not update and self.module.params['proxmox_default_behavior'] == 'compatibility': - kwargs['args'] = vm_args - elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: + if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: kwargs['args'] = self.module.params['args'] elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None: self.module.fail_json(msg='args parameter require root@pam user. ') @@ -1330,11 +1290,6 @@ def main(): virtio=dict(type='dict'), vmid=dict(type='int'), watchdog=dict(), - proxmox_default_behavior=dict(type='str', - default='no_defaults', - choices=['compatibility', 'no_defaults'], - removed_from_collection='community.general', - removed_in_version='10.0.0'), ) module_args.update(kvm_args) @@ -1363,28 +1318,6 @@ def main(): vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - acpi=True, - autostart=False, - balloon=0, - boot='cnd', - cores=1, - cpu='kvm64', - cpuunits=1000, - format='qcow2', - kvm=True, - memory=512, - ostype='l26', - sockets=1, - tablet=False, - template=False, - vga='std', - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - if module.params['format'] == 'unspecified': module.params['format'] = None diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index 4a7aac483e..338fb92ebd 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -39,7 +39,7 @@ notes: - Since community.general 6.5.0, credentials (that is, O(username) and O(password), O(activationkey), or O(token)) are needed only in case the the system is not registered, or O(force_register) is specified; this makes it possible to use the module to tweak an - already registered system, for example attaching pools to it (using O(pool), or O(pool_ids)), + already registered system, for example attaching pools to it (using O(pool_ids)), and modifying the C(syspurpose) attributes (using O(syspurpose)). requirements: - subscription-manager @@ -138,29 +138,14 @@ options: description: - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello type: str - pool: - description: - - | - Specify a subscription pool name to consume. Regular expressions accepted. - Mutually exclusive with O(pool_ids). - - | - Please use O(pool_ids) instead: specifying pool IDs is much faster, - and it avoids to match new pools that become available for the - system and are not explicitly wanted. Also, this option does not - support quantities. - - | - This option is deprecated for the reasons mentioned above, - and it will be removed in community.general 10.0.0. - default: '^$' - type: str pool_ids: description: - | - Specify subscription pool IDs to consume. Prefer over O(pool) when possible as it is much faster. + Specify subscription pool IDs to consume. A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), or as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple - entitlements from a pool (the pool must support this). Mutually exclusive with O(pool). + entitlements from a pool (the pool must support this). default: [] type: list elements: raw @@ -261,20 +246,6 @@ EXAMPLES = ''' password: somepass consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' - -- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^Red Hat Enterprise Server$' - - name: Register as user credentials into given environment (against Red Hat Satellite or Katello), and auto-subscribe. community.general.redhat_subscription: state: present @@ -783,42 +754,6 @@ class Rhsm(object): self.update_plugin_conf('rhnplugin', False) self.update_plugin_conf('subscription-manager', False) - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression. It matches regexp against available pool ids first. - If any pool ids match, subscribe to those pools and return. - - If no pool ids match, then match regexp against available pool product - names. Note this can still easily match many many pools. Then subscribe - to those pools. - - Since a pool id is a more specific match, we only fallback to matching - against names if we didn't match pool ids. - - Raises: - * Exception - if error occurs while running command - ''' - # See https://github.com/ansible/ansible/issues/19466 - - # subscribe to pools whose pool id matches regexp (and only the pool id) - subscribed_pool_ids = self.subscribe_pool(regexp) - - # If we found any matches, we are done - # Don't attempt to match pools by product name - if subscribed_pool_ids: - return subscribed_pool_ids - - # We didn't match any pool ids. - # Now try subscribing to pools based on product name match - # Note: This can match lots of product names. - subscribed_by_product_pool_ids = self.subscribe_product(regexp) - if subscribed_by_product_pool_ids: - return subscribed_by_product_pool_ids - - # no matches - return [] - def subscribe_by_pool_ids(self, pool_ids): """ Try to subscribe to the list of pool IDs @@ -837,56 +772,6 @@ class Rhsm(object): self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) return pool_ids - def subscribe_pool(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_pools(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def subscribe_product(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_products(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def update_subscriptions(self, regexp): - changed = False - consumed_pools = RhsmPools(self.module, consumed=True) - pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] - pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) - - serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] - serials = self.unsubscribe(serials=serials_to_remove) - - subscribed_pool_ids = self.subscribe(regexp) - - if subscribed_pool_ids or serials: - changed = True - return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, - 'unsubscribed_serials': serials} - def update_subscriptions_by_pool_ids(self, pool_ids): changed = False consumed_pools = RhsmPools(self.module, consumed=True) @@ -1109,11 +994,6 @@ def main(): 'activationkey': {'no_log': True}, 'org_id': {}, 'environment': {}, - 'pool': { - 'default': '^$', - 'removed_in_version': '10.0.0', - 'removed_from_collection': 'community.general', - }, 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, 'consumer_type': {}, 'consumer_name': {}, @@ -1144,8 +1024,7 @@ def main(): ['token', 'username'], ['activationkey', 'consumer_id'], ['activationkey', 'environment'], - ['activationkey', 'auto_attach'], - ['pool', 'pool_ids']], + ['activationkey', 'auto_attach']], required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]], ) @@ -1173,7 +1052,6 @@ def main(): if activationkey and not org_id: module.fail_json(msg='org_id is required when using activationkey') environment = module.params['environment'] - pool = module.params['pool'] pool_ids = {} for value in module.params['pool_ids']: if isinstance(value, dict): @@ -1217,12 +1095,9 @@ def main(): rhsm.sync_syspurpose() except Exception as e: module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) - if pool != '^$' or pool_ids: + if pool_ids: try: - if pool_ids: - result = rhsm.update_subscriptions_by_pool_ids(pool_ids) - else: - result = rhsm.update_subscriptions(pool) + result = rhsm.update_subscriptions_by_pool_ids(pool_ids) except Exception as e: module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) else: @@ -1245,8 +1120,6 @@ def main(): rhsm.sync_syspurpose() if pool_ids: subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) - elif pool != '^$': - subscribed_pool_ids = rhsm.subscribe(pool) else: subscribed_pool_ids = [] except Exception as e: diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py index e58389102e..ed8b0e7d58 100644 --- a/plugins/modules/rhsm_repository.py +++ b/plugins/modules/rhsm_repository.py @@ -36,11 +36,9 @@ options: description: - If state is equal to present or disabled, indicates the desired repository state. - - | - Please note that V(present) and V(absent) are deprecated, and will be - removed in community.general 10.0.0; please use V(enabled) and - V(disabled) instead. - choices: [present, enabled, absent, disabled] + - In community.general 10.0.0 the states V(present) and V(absent) have been + removed. Please use V(enabled) and V(disabled) instead. + choices: [enabled, disabled] default: "enabled" type: str name: @@ -240,7 +238,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='list', elements='str', required=True), - state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'), + state=dict(choices=['enabled', 'disabled'], default='enabled'), purge=dict(type='bool', default=False), ), supports_check_mode=True, @@ -257,14 +255,6 @@ def main(): state = module.params['state'] purge = module.params['purge'] - if state in ['present', 'absent']: - replacement = 'enabled' if state == 'present' else 'disabled' - module.deprecate( - 'state=%s is deprecated; please use state=%s instead' % (state, replacement), - version='10.0.0', - collection_name='community.general', - ) - repository_modify(module, rhsm, state, name, purge) diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml index 1b529d1112..1ce9767b70 100644 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ b/tests/integration/targets/proxmox/tasks/main.yml @@ -585,7 +585,6 @@ api_token_id: "{{ api_token_id | default(omit) }}" api_token_secret: "{{ api_token_secret | default(omit) }}" validate_certs: "{{ validate_certs }}" - proxmox_default_behavior: "no_defaults" node: "{{ node }}" vmid: "{{ vmid }}" state: absent diff --git a/tests/unit/plugins/modules/test_redhat_subscription.py b/tests/unit/plugins/modules/test_redhat_subscription.py index 9473d0d46f..7be3740d26 100644 --- a/tests/unit/plugins/modules/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/test_redhat_subscription.py @@ -432,73 +432,6 @@ TEST_CASES = [ 'msg': "System successfully registered to 'None'." } ], - # Test of registration using username and password and attach to pool - [ - { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'pool': 'ff8080816b8e967f016b8e99632804a6' - }, - { - 'id': 'test_registeration_username_password_pool', - 'run_command.calls': [ - ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') - ), - ( - [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' - ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ), - ( - [ - 'subscription-manager list --available', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, - ''' -+-------------------------------------------+ - Available Subscriptions -+-------------------------------------------+ -Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server) -Provides: SP Server Bits -SKU: sp-server-prem-prod -Contract: 0 -Pool ID: ff8080816b8e967f016b8e99632804a6 -Provides Management: Yes -Available: 5 -Suggested: 1 -Service Type: L1-L3 -Roles: SP Server -Service Level: Premium -Usage: Production -Add-ons: -Subscription Type: Standard -Starts: 06/25/19 -Ends: 06/24/20 -Entitlement Type: Physical -''', ''), - ] - ), - ( - 'subscription-manager attach --pool ff8080816b8e967f016b8e99632804a6', - {'check_rc': True}, - (0, '', '') - ) - ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } - ], # Test of registration using username and password and attach to pool ID and quantities [ { diff --git a/tests/unit/plugins/modules/test_rhsm_repository.py b/tests/unit/plugins/modules/test_rhsm_repository.py index e822c7e844..b73b43b4c5 100644 --- a/tests/unit/plugins/modules/test_rhsm_repository.py +++ b/tests/unit/plugins/modules/test_rhsm_repository.py @@ -648,39 +648,6 @@ TEST_CASES = [ 'repositories': REPOS.copy().disable('awesomeos-99000'), } ], - # disable an enabled repository (using state=absent) - [ - { - 'name': 'awesomeos-99000', - 'state': 'absent', - }, - { - 'id': 'test_disable_single_using_absent', - 'run_command.calls': [ - ( - [ - '/testbin/subscription-manager', - 'repos', - '--list', - ], - SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') - ), - ( - [ - '/testbin/subscription-manager', - 'repos', - '--disable', - 'awesomeos-99000', - ], - SUBMAN_KWARGS, - (0, '', '') - ), - ], - 'changed': True, - 'repositories': REPOS.copy().disable('awesomeos-99000'), - } - ], # disable an already disabled repository [ { From 8ef77d8664598154fdd51bf522c0afc62fe36b65 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 28 Sep 2024 17:17:36 +1200 Subject: [PATCH 255/482] unit test helper: big revamp (#8894) * initial commit * multiple changes: - TestCaseContext fixture no longer need to autouse=True - Helper.from_module() allows extra param to specify yaml file - test_django_check: adjusted .py and .yaml * set fixtures per testcase * set fixtures per testcase * rollback to original state * patch_ansible_module fixture - now it works not only in parametrized functions but also directly with args * tests/unit/plugins/modules/helper.py - improved encapsulation, class Helper no longer knows details about test cases - test functions no longer parametrized, that allows using test case fixtures per test function - renamed 'context' to 'mock' * enable Helper.from_list(), better param name 'ansible_module' * adjusted test fiels to new helper * remove unnecessary .license file * fix bracket * fix reference name * Update tests/unit/plugins/modules/helper.py Co-authored-by: Felix Fontein * revert to parametrized test func instead of multiple funcs --------- Co-authored-by: Felix Fontein --- tests/unit/plugins/modules/conftest.py | 38 +- tests/unit/plugins/modules/helper.py | 330 ++++++++++-------- tests/unit/plugins/modules/test_cpanm.py | 2 +- tests/unit/plugins/modules/test_cpanm.yaml | 54 ++- .../unit/plugins/modules/test_django_check.py | 2 +- .../plugins/modules/test_django_check.yaml | 10 +- .../plugins/modules/test_django_command.py | 2 +- .../plugins/modules/test_django_command.yaml | 22 +- .../modules/test_django_createcachetable.py | 2 +- .../modules/test_django_createcachetable.yaml | 5 +- .../unit/plugins/modules/test_facter_facts.py | 2 +- .../plugins/modules/test_facter_facts.yaml | 14 +- tests/unit/plugins/modules/test_gconftool2.py | 2 +- .../unit/plugins/modules/test_gconftool2.yaml | 19 +- .../plugins/modules/test_gconftool2_info.py | 2 +- .../plugins/modules/test_gconftool2_info.yaml | 8 +- tests/unit/plugins/modules/test_gio_mime.py | 2 +- tests/unit/plugins/modules/test_gio_mime.yaml | 9 +- tests/unit/plugins/modules/test_opkg.py | 2 +- tests/unit/plugins/modules/test_opkg.yaml | 15 +- tests/unit/plugins/modules/test_puppet.py | 2 +- tests/unit/plugins/modules/test_puppet.yaml | 235 +++++++------ tests/unit/plugins/modules/test_snap.py | 176 +++++----- tests/unit/plugins/modules/test_xfconf.py | 2 +- tests/unit/plugins/modules/test_xfconf.yaml | 100 +++--- .../unit/plugins/modules/test_xfconf_info.py | 2 +- .../plugins/modules/test_xfconf_info.yaml | 30 +- 27 files changed, 611 insertions(+), 478 deletions(-) diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index 9504c2336d..6e96c58316 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -16,22 +16,34 @@ from ansible.module_utils.common._collections_compat import MutableMapping from ansible_collections.community.general.plugins.module_utils import deps -@pytest.fixture -def patch_ansible_module(request, mocker): - if isinstance(request.param, string_types): - args = request.param - elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False - args = json.dumps(request.param) +def fix_ansible_args(args): + if isinstance(args, string_types): + return args + + if isinstance(args, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in args: + args = {'ANSIBLE_MODULE_ARGS': args} + if '_ansible_remote_tmp' not in args['ANSIBLE_MODULE_ARGS']: + args['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args['ANSIBLE_MODULE_ARGS']: + args['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(args) + return args + else: raise Exception('Malformed data to the patch_ansible_module pytest fixture') - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + +@pytest.fixture +def patch_ansible_module(request, mocker): + if hasattr(request, "param"): + args = fix_ansible_args(request.param) + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + else: + def _patch(args): + args = fix_ansible_args(args) + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + return _patch @pytest.fixture(autouse=True) diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/helper.py index e012980afe..0626e39f1c 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/helper.py @@ -8,75 +8,221 @@ __metaclass__ = type import sys import json -from collections import namedtuple -import pytest import yaml +import pytest -ModuleTestCase = namedtuple("ModuleTestCase", ["id", "input", "output", "run_command_calls", "flags"]) -RunCmdCall = namedtuple("RunCmdCall", ["command", "environ", "rc", "out", "err"]) +class Helper(object): + @staticmethod + def from_list(test_module, ansible_module, test_cases): + helper = Helper(test_module, ansible_module, test_cases=test_cases) + return helper + + @staticmethod + def from_file(test_module, ansible_module, filename): + with open(filename, "r") as test_cases: + test_cases_data = yaml.safe_load(test_cases) + return Helper.from_list(test_module, ansible_module, test_cases_data) + + @staticmethod + def from_module(ansible_module, test_module_name, test_spec=None): + test_module = sys.modules[test_module_name] + if test_spec is None: + test_spec = test_module.__file__.replace('.py', '.yaml') + return Helper.from_file(test_module, ansible_module, test_spec) + + def add_func_to_test_module(self, name, func): + setattr(self.test_module, name, func) + + def __init__(self, test_module, ansible_module, test_cases): + self.test_module = test_module + self.ansible_module = ansible_module + self.test_cases = [] + self.fixtures = {} + + for test_case in test_cases: + tc = ModuleTestCase.make_test_case(test_case, test_module) + self.test_cases.append(tc) + self.fixtures.update(tc.fixtures) + self.set_test_func() + self.set_fixtures(self.fixtures) + + @property + def runner(self): + return Runner(self.ansible_module.main) + + def set_test_func(self): + @pytest.mark.parametrize('test_case', self.test_cases, ids=[tc.id for tc in self.test_cases]) + @pytest.mark.usefixtures(*self.fixtures) + def _test_module(mocker, capfd, patch_ansible_module, test_case): + """ + Run unit tests for each test case in self.test_cases + """ + patch_ansible_module(test_case.input) + self.runner.run(mocker, capfd, test_case) + + self.add_func_to_test_module("test_module", _test_module) + + return _test_module + + def set_fixtures(self, fixtures): + for name, fixture in fixtures.items(): + self.add_func_to_test_module(name, fixture) -class _BaseContext(object): - def __init__(self, helper, testcase, mocker, capfd): - self.helper = helper - self.testcase = testcase - self.mocker = mocker - self.capfd = capfd +class Runner: + def __init__(self, module_main): + self.module_main = module_main + self.results = None - def __enter__(self): - return self + def run(self, mocker, capfd, test_case): + test_case.setup(mocker) + self.pytest_module(capfd, test_case.flags) + test_case.check(self.results) - def __exit__(self, exc_type, exc_val, exc_tb): - return False + def pytest_module(self, capfd, flags): + if flags.get("skip"): + pytest.skip(flags.get("skip")) + if flags.get("xfail"): + pytest.xfail(flags.get("xfail")) - def _run(self): with pytest.raises(SystemExit): - self.helper.module_main() + (self.module_main)() - out, err = self.capfd.readouterr() - results = json.loads(out) + out, err = capfd.readouterr() + self.results = json.loads(out) - self.check_results(results) - def test_flags(self, flag=None): - flags = self.testcase.flags - if flag: - flags = flags.get(flag) - return flags +class ModuleTestCase: + def __init__(self, id, input, output, mocks, flags): + self.id = id + self.input = input + self.output = output + self._mocks = mocks + self.mocks = {} + self.flags = flags - def run(self): - func = self._run + self._fixtures = {} - test_flags = self.test_flags() - if test_flags.get("skip"): - pytest.skip(test_flags.get("skip")) - if test_flags.get("xfail"): - pytest.xfail(test_flags.get("xfail")) + def __str__(self): + return "".format( + id=self.id, + input="input " if self.input else "", + output="output " if self.output else "", + mocks="({0})".format(", ".join(self.mocks.keys())), + flags=self.flags + ) - func() + def __repr__(self): + return "ModuleTestCase(id={id}, input={input}, output={output}, mocks={mocks}, flags={flags})".format( + id=self.id, + input=self.input, + output=self.output, + mocks=repr(self.mocks), + flags=self.flags + ) - def check_results(self, results): - print("testcase =\n%s" % str(self.testcase)) + @staticmethod + def make_test_case(test_case, test_module): + tc = ModuleTestCase( + id=test_case["id"], + input=test_case.get("input", {}), + output=test_case.get("output", {}), + mocks=test_case.get("mocks", {}), + flags=test_case.get("flags", {}) + ) + tc.build_mocks(test_module) + return tc + + def build_mocks(self, test_module): + for mock, mock_spec in self._mocks.items(): + mock_class = self.get_mock_class(test_module, mock) + self.mocks[mock] = mock_class.build_mock(mock_spec) + + self._fixtures.update(self.mocks[mock].fixtures()) + + @staticmethod + def get_mock_class(test_module, mock): + try: + class_name = "".join(x.capitalize() for x in mock.split("_")) + "Mock" + plugin_class = getattr(test_module, class_name) + assert issubclass(plugin_class, TestCaseMock), "Class {0} is not a subclass of TestCaseMock".format(class_name) + return plugin_class + except AttributeError: + raise ValueError("Cannot find class {0} for mock {1}".format(class_name, mock)) + + @property + def fixtures(self): + return dict(self._fixtures) + + def setup(self, mocker): + self.setup_testcase(mocker) + self.setup_mocks(mocker) + + def check(self, results): + self.check_testcase(results) + self.check_mocks(self, results) + + def setup_testcase(self, mocker): + pass + + def setup_mocks(self, mocker): + for mock in self.mocks.values(): + mock.setup(mocker) + + def check_testcase(self, results): + print("testcase =\n%s" % repr(self)) print("results =\n%s" % results) if 'exception' in results: print("exception = \n%s" % results["exception"]) - for test_result in self.testcase.output: - assert results[test_result] == self.testcase.output[test_result], \ - "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], self.testcase.output[test_result]) + for test_result in self.output: + assert results[test_result] == self.output[test_result], \ + "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], self.output[test_result]) + + def check_mocks(self, test_case, results): + for mock in self.mocks.values(): + mock.check(test_case, results) -class _RunCmdContext(_BaseContext): - def __init__(self, *args, **kwargs): - super(_RunCmdContext, self).__init__(*args, **kwargs) - self.run_cmd_calls = self.testcase.run_command_calls - self.mock_run_cmd = self._make_mock_run_cmd() +class TestCaseMock: + @classmethod + def build_mock(cls, mock_specs): + return cls(mock_specs) - def _make_mock_run_cmd(self): + def __init__(self, mock_specs): + self.mock_specs = mock_specs + + def fixtures(self): + return {} + + def setup(self, mocker): + pass + + def check(self, test_case, results): + raise NotImplementedError() + + +class RunCommandMock(TestCaseMock): + def __str__(self): + return "".format(specs=self.mock_specs) + + def __repr__(self): + return "RunCommandMock({specs})".format(specs=self.mock_specs) + + def fixtures(self): + @pytest.fixture + def patch_bin(mocker): + def mockie(self, path, *args, **kwargs): + return "/testbin/{0}".format(path) + mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', mockie) + + return {"patch_bin": patch_bin} + + def setup(self, mocker): def _results(): - for result in [(x.rc, x.out, x.err) for x in self.run_cmd_calls]: + for result in [(x['rc'], x['out'], x['err']) for x in self.mock_specs]: yield result raise Exception("testcase has not enough run_command calls") @@ -88,102 +234,14 @@ class _RunCmdContext(_BaseContext): raise Exception("rc = {0}".format(result[0])) return result - mock_run_command = self.mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', - side_effect=side_effect) - return mock_run_command + self.mock_run_cmd = mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', side_effect=side_effect) - def check_results(self, results): - super(_RunCmdContext, self).check_results(results) + def check(self, test_case, results): call_args_list = [(item[0][0], item[1]) for item in self.mock_run_cmd.call_args_list] - expected_call_args_list = [(item.command, item.environ) for item in self.run_cmd_calls] + expected_call_args_list = [(item['command'], item['environ']) for item in self.mock_specs] print("call args list =\n%s" % call_args_list) print("expected args list =\n%s" % expected_call_args_list) - assert self.mock_run_cmd.call_count == len(self.run_cmd_calls), "{0} != {1}".format(self.mock_run_cmd.call_count, len(self.run_cmd_calls)) + assert self.mock_run_cmd.call_count == len(self.mock_specs), "{0} != {1}".format(self.mock_run_cmd.call_count, len(self.mock_specs)) if self.mock_run_cmd.call_count: assert call_args_list == expected_call_args_list - - -class Helper(object): - @staticmethod - def from_list(module_main, list_): - helper = Helper(module_main, test_cases=list_) - return helper - - @staticmethod - def from_file(module_main, filename): - with open(filename, "r") as test_cases: - helper = Helper(module_main, test_cases=test_cases) - return helper - - @staticmethod - def from_module(module, test_module_name): - basename = module.__name__.split(".")[-1] - test_spec = "tests/unit/plugins/modules/test_{0}.yaml".format(basename) - helper = Helper.from_file(module.main, test_spec) - - setattr(sys.modules[test_module_name], "patch_bin", helper.cmd_fixture) - setattr(sys.modules[test_module_name], "test_module", helper.test_module) - - def __init__(self, module_main, test_cases): - self.module_main = module_main - self._test_cases = test_cases - if isinstance(test_cases, (list, tuple)): - self.testcases = test_cases - else: - self.testcases = self._make_test_cases() - - @property - def cmd_fixture(self): - @pytest.fixture - def patch_bin(mocker): - def mockie(self, path, *args, **kwargs): - return "/testbin/{0}".format(path) - mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', mockie) - - return patch_bin - - def _make_test_cases(self): - test_cases = yaml.safe_load(self._test_cases) - - results = [] - for tc in test_cases: - for tc_param in ["input", "output", "flags"]: - if not tc.get(tc_param): - tc[tc_param] = {} - if tc.get("run_command_calls"): - tc["run_command_calls"] = [RunCmdCall(**r) for r in tc["run_command_calls"]] - else: - tc["run_command_calls"] = [] - results.append(ModuleTestCase(**tc)) - - return results - - @property - def testcases_params(self): - return [[x.input, x] for x in self.testcases] - - @property - def testcases_ids(self): - return [item.id for item in self.testcases] - - def __call__(self, *args, **kwargs): - return _RunCmdContext(self, *args, **kwargs) - - @property - def test_module(self): - helper = self - - @pytest.mark.parametrize('patch_ansible_module, testcase', - helper.testcases_params, ids=helper.testcases_ids, - indirect=['patch_ansible_module']) - @pytest.mark.usefixtures('patch_ansible_module') - def _test_module(mocker, capfd, patch_bin, testcase): - """ - Run unit tests for test cases listed in TEST_CASES - """ - - with helper(testcase, mocker, capfd) as testcase_context: - testcase_context.run() - - return _test_module diff --git a/tests/unit/plugins/modules/test_cpanm.py b/tests/unit/plugins/modules/test_cpanm.py index 4eecf000fd..28090455f0 100644 --- a/tests/unit/plugins/modules/test_cpanm.py +++ b/tests/unit/plugins/modules/test_cpanm.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import cpanm -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(cpanm, __name__) diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index 4eed957206..ad081254d6 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -10,7 +10,8 @@ mode: compatibility output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/perl, -le, 'use Dancer;'] environ: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 2 @@ -27,7 +28,8 @@ mode: compatibility output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/perl, -le, 'use Dancer;'] environ: *env-def-false rc: 0 @@ -38,7 +40,8 @@ name: Dancer output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer] environ: *env-def-true rc: 0 @@ -50,7 +53,8 @@ mode: compatibility output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -61,7 +65,8 @@ name: MIYAGAWA/Plack-0.99_05.tar.gz output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -74,7 +79,8 @@ locallib: /srv/webapps/my_app/extlib output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -86,7 +92,8 @@ mode: new output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, /srv/webapps/my_app/src/] environ: *env-def-true rc: 0 @@ -100,7 +107,8 @@ locallib: /srv/webapps/my_app/extlib output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --notest, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -113,7 +121,8 @@ mirror: "http://cpan.cpantesters.org/" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --mirror, "http://cpan.cpantesters.org/", Dancer] environ: *env-def-true rc: 0 @@ -126,7 +135,8 @@ system_lib: true output: failed: true - run_command_calls: [] + mocks: + run_command: [] - id: install_minversion_implicit input: name: Dancer @@ -134,7 +144,8 @@ version: "1.0" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer~1.0] environ: *env-def-true rc: 0 @@ -147,7 +158,8 @@ version: "~1.5" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer~1.5] environ: *env-def-true rc: 0 @@ -160,7 +172,8 @@ version: "@1.7" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer@1.7] environ: *env-def-true rc: 0 @@ -174,7 +187,8 @@ output: failed: true msg: parameter 'version' must not be used when installing from a file - run_command_calls: [] + mocks: + run_command: [] - id: install_specific_version_from_directory_error input: from_path: ~/ @@ -183,7 +197,8 @@ output: failed: true msg: parameter 'version' must not be used when installing from a directory - run_command_calls: [] + mocks: + run_command: [] - id: install_specific_version_from_git_url_explicit input: name: "git://github.com/plack/Plack.git" @@ -191,7 +206,8 @@ version: "@1.7" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@1.7"] environ: *env-def-true rc: 0 @@ -204,7 +220,8 @@ version: "2.5" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@2.5"] environ: *env-def-true rc: 0 @@ -218,4 +235,5 @@ output: failed: true msg: operator '~' not allowed in version parameter when installing from git repository - run_command_calls: [] + mocks: + run_command: [] diff --git a/tests/unit/plugins/modules/test_django_check.py b/tests/unit/plugins/modules/test_django_check.py index 8aec71900b..52210bdb76 100644 --- a/tests/unit/plugins/modules/test_django_check.py +++ b/tests/unit/plugins/modules/test_django_check.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_check -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(django_check, __name__) diff --git a/tests/unit/plugins/modules/test_django_check.yaml b/tests/unit/plugins/modules/test_django_check.yaml index 6156aaa2c2..91a8ff1953 100644 --- a/tests/unit/plugins/modules/test_django_check.yaml +++ b/tests/unit/plugins/modules/test_django_check.yaml @@ -7,7 +7,8 @@ - id: success input: settings: whatever.settings - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -17,9 +18,10 @@ input: settings: whatever.settings database: - - abc - - def - run_command_calls: + - abc + - def + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_django_command.py b/tests/unit/plugins/modules/test_django_command.py index ffa9feb394..8be910fd27 100644 --- a/tests/unit/plugins/modules/test_django_command.py +++ b/tests/unit/plugins/modules/test_django_command.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_command -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(django_command, __name__) diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml index 046dd87f03..2a19351083 100644 --- a/tests/unit/plugins/modules/test_django_command.yaml +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -8,12 +8,13 @@ input: command: check extra_args: - - babaloo - - yaba - - daba - - doo + - babaloo + - yaba + - daba + - doo settings: whatever.settings - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -23,14 +24,15 @@ input: command: check extra_args: - - babaloo - - yaba - - daba - - doo + - babaloo + - yaba + - daba + - doo settings: whatever.settings output: failed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: *env-def rc: 1 diff --git a/tests/unit/plugins/modules/test_django_createcachetable.py b/tests/unit/plugins/modules/test_django_createcachetable.py index 5a4b89c0c1..74bdf1cc63 100644 --- a/tests/unit/plugins/modules/test_django_createcachetable.py +++ b/tests/unit/plugins/modules/test_django_createcachetable.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_createcachetable -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(django_createcachetable, __name__) diff --git a/tests/unit/plugins/modules/test_django_createcachetable.yaml b/tests/unit/plugins/modules/test_django_createcachetable.yaml index 1808b163fb..22b7dcb304 100644 --- a/tests/unit/plugins/modules/test_django_createcachetable.yaml +++ b/tests/unit/plugins/modules/test_django_createcachetable.yaml @@ -7,9 +7,10 @@ - id: command_success input: settings: whatever.settings - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + environ: {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 out: "whatever\n" err: "" diff --git a/tests/unit/plugins/modules/test_facter_facts.py b/tests/unit/plugins/modules/test_facter_facts.py index 227d8cd150..bb74216b88 100644 --- a/tests/unit/plugins/modules/test_facter_facts.py +++ b/tests/unit/plugins/modules/test_facter_facts.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import facter_facts -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(facter_facts, __name__) diff --git a/tests/unit/plugins/modules/test_facter_facts.yaml b/tests/unit/plugins/modules/test_facter_facts.yaml index c287fdcfda..e53f7fe60f 100644 --- a/tests/unit/plugins/modules/test_facter_facts.yaml +++ b/tests/unit/plugins/modules/test_facter_facts.yaml @@ -11,7 +11,8 @@ a: 1 b: 2 c: 3 - run_command_calls: + mocks: + run_command: - command: [/testbin/facter, --json] environ: &env-def {check_rc: true} rc: 0 @@ -21,17 +22,18 @@ - id: with args input: arguments: - - -p - - system_uptime - - timezone - - is_virtual + - -p + - system_uptime + - timezone + - is_virtual output: ansible_facts: facter: a: 1 b: 2 c: 3 - run_command_calls: + mocks: + run_command: - command: [/testbin/facter, --json, -p, system_uptime, timezone, is_virtual] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gconftool2.py b/tests/unit/plugins/modules/test_gconftool2.py index 9608016e58..2ba2e1c70e 100644 --- a/tests/unit/plugins/modules/test_gconftool2.py +++ b/tests/unit/plugins/modules/test_gconftool2.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2 -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gconftool2, __name__) diff --git a/tests/unit/plugins/modules/test_gconftool2.yaml b/tests/unit/plugins/modules/test_gconftool2.yaml index 5114dc45fd..084741e6d1 100644 --- a/tests/unit/plugins/modules/test_gconftool2.yaml +++ b/tests/unit/plugins/modules/test_gconftool2.yaml @@ -13,7 +13,8 @@ output: new_value: '200' changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -38,7 +39,8 @@ output: new_value: '200' changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -63,7 +65,8 @@ output: new_value: 'false' changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 @@ -84,9 +87,10 @@ state: absent key: /desktop/gnome/background/picture_filename output: - new_value: null + new_value: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -102,9 +106,10 @@ state: absent key: /apps/gnome_settings_daemon/screensaver/start_screensaver output: - new_value: null + new_value: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gconftool2_info.py b/tests/unit/plugins/modules/test_gconftool2_info.py index 54676a12d2..4daa655714 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.py +++ b/tests/unit/plugins/modules/test_gconftool2_info.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2_info -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gconftool2_info, __name__) diff --git a/tests/unit/plugins/modules/test_gconftool2_info.yaml b/tests/unit/plugins/modules/test_gconftool2_info.yaml index eb8bef750d..26db16a368 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.yaml +++ b/tests/unit/plugins/modules/test_gconftool2_info.yaml @@ -9,7 +9,8 @@ key: /desktop/gnome/background/picture_filename output: value: '100' - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -19,8 +20,9 @@ input: key: /desktop/gnome/background/picture_filename output: - value: null - run_command_calls: + value: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gio_mime.py b/tests/unit/plugins/modules/test_gio_mime.py index f2402ac352..5e51320485 100644 --- a/tests/unit/plugins/modules/test_gio_mime.py +++ b/tests/unit/plugins/modules/test_gio_mime.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gio_mime -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gio_mime, __name__) diff --git a/tests/unit/plugins/modules/test_gio_mime.yaml b/tests/unit/plugins/modules/test_gio_mime.yaml index d9e47a60ea..75e5554c7c 100644 --- a/tests/unit/plugins/modules/test_gio_mime.yaml +++ b/tests/unit/plugins/modules/test_gio_mime.yaml @@ -11,7 +11,8 @@ output: handler: google-chrome.desktop changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -32,7 +33,8 @@ changed: true flags: skip: test helper does not support check mode yet - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 @@ -51,7 +53,8 @@ output: handler: google-chrome.desktop changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_opkg.py b/tests/unit/plugins/modules/test_opkg.py index c42025959e..cfee3e1115 100644 --- a/tests/unit/plugins/modules/test_opkg.py +++ b/tests/unit/plugins/modules/test_opkg.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import opkg -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(opkg, __name__) diff --git a/tests/unit/plugins/modules/test_opkg.yaml b/tests/unit/plugins/modules/test_opkg.yaml index 6e227dea27..0cef54ac08 100644 --- a/tests/unit/plugins/modules/test_opkg.yaml +++ b/tests/unit/plugins/modules/test_opkg.yaml @@ -10,7 +10,8 @@ state: present output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 @@ -39,7 +40,8 @@ state: present output: msg: package(s) already present - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -53,7 +55,8 @@ force: reinstall output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -80,7 +83,8 @@ state: present output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -109,7 +113,8 @@ update_cache: true output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, update] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_puppet.py b/tests/unit/plugins/modules/test_puppet.py index 57f88ada1c..efdb042a5a 100644 --- a/tests/unit/plugins/modules/test_puppet.py +++ b/tests/unit/plugins/modules/test_puppet.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import puppet -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(puppet, __name__) diff --git a/tests/unit/plugins/modules/test_puppet.yaml b/tests/unit/plugins/modules/test_puppet.yaml index 7909403cfb..668571273c 100644 --- a/tests/unit/plugins/modules/test_puppet.yaml +++ b/tests/unit/plugins/modules/test_puppet.yaml @@ -8,27 +8,28 @@ input: {} output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" environ: *env-def rc: 0 out: "" @@ -38,28 +39,29 @@ certname: potatobox output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --certname=potatobox + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --certname=potatobox environ: *env-def rc: 0 out: "" @@ -69,29 +71,30 @@ tags: [a, b, c] output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --tags - - a,b,c + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --tags + - a,b,c environ: *env-def rc: 0 out: "" @@ -101,29 +104,30 @@ skip_tags: [d, e, f] output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --skip_tags - - d,e,f + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --skip_tags + - d,e,f environ: *env-def rc: 0 out: "" @@ -133,28 +137,29 @@ noop: false output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --no-noop + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --no-noop environ: *env-def rc: 0 out: "" @@ -164,28 +169,29 @@ noop: true output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --noop + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --noop environ: *env-def rc: 0 out: "" @@ -195,29 +201,30 @@ waitforlock: 30 output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --waitforlock - - "30" + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --waitforlock + - "30" environ: *env-def rc: 0 out: "" diff --git a/tests/unit/plugins/modules/test_snap.py b/tests/unit/plugins/modules/test_snap.py index 480f637b6d..d70094551a 100644 --- a/tests/unit/plugins/modules/test_snap.py +++ b/tests/unit/plugins/modules/test_snap.py @@ -6,8 +6,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from .helper import Helper, ModuleTestCase, RunCmdCall +import sys + from ansible_collections.community.general.plugins.modules import snap +from .helper import Helper, RunCommandMock # pylint: disable=unused-import issue_6803_status_out = """Name Version Rev Tracking Publisher Notes @@ -375,100 +377,102 @@ issue_6803_kubectl_out = ( ) TEST_CASES = [ - ModuleTestCase( + dict( id="simple case", input={"name": ["hello-world"]}, output=dict(changed=True, snaps_installed=["hello-world"]), flags={}, - run_command_calls=[ - RunCmdCall( - command=['/testbin/snap', 'info', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: hello-world\n', - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="", - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="hello-world (12345/stable) v12345 from Canonical** installed\n", - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ] + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'info', 'hello-world'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out='name: hello-world\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out="", + err="", + ), + dict( + command=['/testbin/snap', 'install', 'hello-world'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out="hello-world (12345/stable) v12345 from Canonical** installed\n", + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), ), - ModuleTestCase( + dict( id="issue_6803", input={"name": ["microk8s", "kubectl"], "classic": True}, output=dict(changed=True, snaps_installed=["microk8s", "kubectl"]), flags={}, - run_command_calls=[ - RunCmdCall( - command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: microk8s\n---\nname: kubectl\n', - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_status_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', '--classic', 'microk8s'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_microk8s_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', '--classic', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_kubectl_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" - "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ] + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out='name: microk8s\n---\nname: kubectl\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_status_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'microk8s'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_microk8s_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'kubectl'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_kubectl_out, + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" + "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), ), ] -helper = Helper.from_list(snap.main, TEST_CASES) -patch_bin = helper.cmd_fixture -test_module = helper.test_module +Helper.from_list(sys.modules[__name__], snap, TEST_CASES) diff --git a/tests/unit/plugins/modules/test_xfconf.py b/tests/unit/plugins/modules/test_xfconf.py index fbc2dae5f2..f902797ee3 100644 --- a/tests/unit/plugins/modules/test_xfconf.py +++ b/tests/unit/plugins/modules/test_xfconf.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(xfconf, __name__) diff --git a/tests/unit/plugins/modules/test_xfconf.yaml b/tests/unit/plugins/modules/test_xfconf.yaml index 908154df26..481b090e94 100644 --- a/tests/unit/plugins/modules/test_xfconf.yaml +++ b/tests/unit/plugins/modules/test_xfconf.yaml @@ -21,7 +21,8 @@ previous_value: '100' type: int value: '90' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 @@ -44,7 +45,8 @@ previous_value: '90' type: int value: '90' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: *env-def rc: 0 @@ -61,13 +63,14 @@ property: /general/SaveOnExit state: present value_type: bool - value: False + value: false output: changed: true previous_value: 'true' type: bool value: 'False' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit] environ: *env-def rc: 0 @@ -90,32 +93,33 @@ previous_value: [Main, Work, Tmp] type: [string, string, string] value: [A, B, C] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" err: "" - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C environ: *env-def rc: 0 out: "" @@ -132,32 +136,33 @@ previous_value: [A, B, C] type: [string, string, string] value: [A, B, C] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 out: "Value is an array with 3 items:\n\nA\nB\nC\n" err: "" - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C environ: *env-def rc: 0 out: "" @@ -170,9 +175,10 @@ output: changed: true previous_value: [A, B, C] - type: null - value: null - run_command_calls: + type: + value: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_xfconf_info.py b/tests/unit/plugins/modules/test_xfconf_info.py index 67c63dda09..308f075490 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.py +++ b/tests/unit/plugins/modules/test_xfconf_info.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf_info -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(xfconf_info, __name__) diff --git a/tests/unit/plugins/modules/test_xfconf_info.yaml b/tests/unit/plugins/modules/test_xfconf_info.yaml index 519a87fdbd..26f77ce474 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.yaml +++ b/tests/unit/plugins/modules/test_xfconf_info.yaml @@ -11,7 +11,8 @@ output: value: '100' is_array: false - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -22,7 +23,8 @@ channel: xfwm4 property: /general/i_dont_exist output: {} - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/i_dont_exist] environ: *env-def rc: 1 @@ -34,7 +36,8 @@ output: failed: true msg: "missing parameter(s) required by 'property': channel" - run_command_calls: [] + mocks: + run_command: [] - id: test_property_get_array input: channel: xfwm4 @@ -42,7 +45,8 @@ output: is_array: true value_array: [Main, Work, Tmp] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 @@ -52,7 +56,8 @@ input: {} output: channels: [a, b, c] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --list] environ: *env-def rc: 0 @@ -63,13 +68,14 @@ channel: xfwm4 output: properties: - - /general/wrap_cycle - - /general/wrap_layout - - /general/wrap_resistance - - /general/wrap_windows - - /general/wrap_workspaces - - /general/zoom_desktop - run_command_calls: + - /general/wrap_cycle + - /general/wrap_layout + - /general/wrap_resistance + - /general/wrap_windows + - /general/wrap_workspaces + - /general/zoom_desktop + mocks: + run_command: - command: [/testbin/xfconf-query, --list, --channel, xfwm4] environ: *env-def rc: 0 From ab84f1632f26ef7a7a3243b3f61871e7e26ef280 Mon Sep 17 00:00:00 2001 From: Julien Lecomte Date: Sun, 29 Sep 2024 14:03:07 +0200 Subject: [PATCH 256/482] Sort parameters in gitlab_group to aid in adding more params (#8899) --- plugins/modules/gitlab_group.py | 170 ++++++++++++++++---------------- 1 file changed, 85 insertions(+), 85 deletions(-) diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 04a8f6c81b..74925430a1 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -33,66 +33,21 @@ attributes: support: none options: - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 auto_devops_enabled: description: - Default to Auto DevOps pipeline for all projects within this group. type: bool version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: 4.2.0 + description: + description: + - A description for the group. + type: str force_delete: description: - Force delete group even if projects in it. @@ -100,6 +55,51 @@ options: type: bool default: false version_added: 7.5.0 + name: + description: + - Name of the group you want to create. + required: true + type: str + parent: + description: + - Allow to create subgroups + - Id or Full path of parent group in the form of group/name + type: str + path: + description: + - The path of the group you want to create, this will be api_url/group_path + - If not supplied, the group_name will be used. + type: str + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + state: + description: + - create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + visibility: + description: + - Default visibility of the group + choices: ["private", "internal", "public"] + default: private + type: str ''' EXAMPLES = ''' @@ -207,13 +207,13 @@ class GitLabGroup(object): parent_id = self.get_group_id(parent) payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], 'auto_devops_enabled': options['auto_devops_enabled'], + 'name': name, + 'parent_id': parent_id, + 'path': options['path'], + 'project_creation_level': options['project_creation_level'], 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], } if options.get('description'): payload['description'] = options['description'] @@ -230,13 +230,13 @@ class GitLabGroup(object): changed = True else: changed, group = self.update_group(self.group_object, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], + 'description': options['description'], + 'name': name, + 'project_creation_level': options['project_creation_level'], 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], }) self.group_object = group @@ -322,28 +322,28 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), avatar_path=dict(type='path'), + description=dict(type='str'), force_delete=dict(type='bool', default=False), + name=dict(type='str', required=True), + parent=dict(type='str'), + path=dict(type='str'), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + require_two_factor_authentication=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_token'], ], required_together=[ ['api_username', 'api_password'], @@ -357,18 +357,18 @@ def main(): # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) + auto_devops_enabled = module.params['auto_devops_enabled'] + avatar_path = module.params['avatar_path'] + description = module.params['description'] + force_delete = module.params['force_delete'] group_name = module.params['name'] group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + parent_identifier = module.params['parent'] project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] require_two_factor_authentication = module.params['require_two_factor_authentication'] - avatar_path = module.params['avatar_path'] - force_delete = module.params['force_delete'] + state = module.params['state'] + subgroup_creation_level = module.params['subgroup_creation_level'] # Define default group_path based on group_name if group_path is None: @@ -395,14 +395,14 @@ def main(): if state == 'present': if gitlab_group.create_or_update_group(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, "avatar_path": avatar_path, + "description": description, + "path": group_path, + "project_creation_level": project_creation_level, + "require_two_factor_authentication": require_two_factor_authentication, + "subgroup_creation_level": subgroup_creation_level, + "visibility": group_visibility, }): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) else: From a7d1b0fc52a48109be866fa017e330b17e388a0a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 30 Sep 2024 19:45:32 +1300 Subject: [PATCH 257/482] python_runner/django_command: bugfixes (#8944) * python_runner/django_command: bugfixes * fix indentation * join path_prefix with : when concatenating with PATH * add changelog frag --- .../fragments/8944-django-command-fix.yml | 3 + plugins/module_utils/python_runner.py | 6 +- plugins/modules/django_command.py | 5 + .../targets/django_command/aliases | 21 +++ .../single_app_project/core/settings.py | 6 + .../single_app_project/manage.py | 21 +++ .../base_test/simple_project/p1/manage.py | 29 ++++ .../simple_project/p1/p1/settings.py | 133 ++++++++++++++++++ .../base_test/simple_project/p1/p1/urls.py | 28 ++++ .../files/base_test/startproj/.keep | 0 .../targets/django_command/meta/main.yml | 8 ++ .../targets/django_command/tasks/main.yaml | 91 ++++++++++++ 12 files changed, 349 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8944-django-command-fix.yml create mode 100644 tests/integration/targets/django_command/aliases create mode 100644 tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py create mode 100755 tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py create mode 100755 tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py create mode 100644 tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py create mode 100644 tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py create mode 100644 tests/integration/targets/django_command/files/base_test/startproj/.keep create mode 100644 tests/integration/targets/django_command/meta/main.yml create mode 100644 tests/integration/targets/django_command/tasks/main.yaml diff --git a/changelogs/fragments/8944-django-command-fix.yml b/changelogs/fragments/8944-django-command-fix.yml new file mode 100644 index 0000000000..755bf5628a --- /dev/null +++ b/changelogs/fragments/8944-django-command-fix.yml @@ -0,0 +1,3 @@ +bugfixes: + - python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). + - django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py index f678f247b4..b65867c61e 100644 --- a/plugins/module_utils/python_runner.py +++ b/plugins/module_utils/python_runner.py @@ -22,10 +22,12 @@ class PythonRunner(CmdRunner): if (os.path.isabs(python) or '/' in python): self.python = python elif self.has_venv: - path_prefix = os.path.join(venv, "bin") + if path_prefix is None: + path_prefix = [] + path_prefix.append(os.path.join(venv, "bin")) if environ_update is None: environ_update = {} - environ_update["PATH"] = "%s:%s" % (path_prefix, os.environ["PATH"]) + environ_update["PATH"] = "%s:%s" % (":".join(path_prefix), os.environ["PATH"]) environ_update["VIRTUAL_ENV"] = venv python_cmd = [self.python] + _ensure_list(command) diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py index 788f4a100e..dcb8d26313 100644 --- a/plugins/modules/django_command.py +++ b/plugins/modules/django_command.py @@ -57,6 +57,8 @@ run_info: returned: success and O(verbosity) >= 3 """ +import shlex + from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt @@ -74,6 +76,9 @@ class DjangoCommand(DjangoModuleHelper): ) django_admin_arg_order = "extra_args" + def __init_module__(self): + self.vars.command = shlex.split(self.vars.command) + def main(): DjangoCommand.execute() diff --git a/tests/integration/targets/django_command/aliases b/tests/integration/targets/django_command/aliases new file mode 100644 index 0000000000..ae3c2623a0 --- /dev/null +++ b/tests/integration/targets/django_command/aliases @@ -0,0 +1,21 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 +skip/python2 +skip/freebsd +skip/macos +skip/osx +skip/rhel8.2 +skip/rhel8.3 +skip/rhel8.4 +skip/rhel8.5 +skip/rhel8.6 +skip/rhel8.7 +skip/rhel8.8 +skip/rhel9.0 +skip/rhel9.1 +skip/rhel9.2 +skip/rhel9.3 +skip/rhel9.4 diff --git a/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py new file mode 100644 index 0000000000..881221c066 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# single_app_project/core/settings.py +SECRET_KEY = 'testtesttesttesttest' diff --git a/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py new file mode 100755 index 0000000000..4b4eddcb67 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +# single_app_project/manage.py +import os +import sys + + +def main(): + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'single_app_project.core.settings') + from django.core.management import execute_from_command_line + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py new file mode 100755 index 0000000000..be3140f44d --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p1.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py new file mode 100644 index 0000000000..86b3ae64c6 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +""" +Django settings for p1 project. + +Generated by 'django-admin startproj' using Django 3.1.5. + +For more information on this file, see +https://docs.djangoproject.com/en/3.1/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.1/ref/settings/ +""" + +import os +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = '%g@gyhl*q@@g(_ab@t^76dao^#b9-v8mw^50)x_bv6wpl+mukj' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = [] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'p1.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'p1.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.1/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.1/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.1/howto/static-files/ + +STATIC_URL = '/static/' +STATIC_ROOT = '/tmp/django-static' + +if "DJANGO_ANSIBLE_RAISE" in os.environ: + raise ValueError("DJANGO_ANSIBLE_RAISE={0}".format(os.environ["DJANGO_ANSIBLE_RAISE"])) diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py new file mode 100644 index 0000000000..36cb592756 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py @@ -0,0 +1,28 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +"""p1 URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/2.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.contrib import admin +from django.urls import path + +urlpatterns = [ + path('admin/', admin.site.urls), +] diff --git a/tests/integration/targets/django_command/files/base_test/startproj/.keep b/tests/integration/targets/django_command/files/base_test/startproj/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/django_command/meta/main.yml b/tests/integration/targets/django_command/meta/main.yml new file mode 100644 index 0000000000..4a216308a2 --- /dev/null +++ b/tests/integration/targets/django_command/meta/main.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_pkg_mgr + - setup_os_pkg_name diff --git a/tests/integration/targets/django_command/tasks/main.yaml b/tests/integration/targets/django_command/tasks/main.yaml new file mode 100644 index 0000000000..9d052dc44f --- /dev/null +++ b/tests/integration/targets/django_command/tasks/main.yaml @@ -0,0 +1,91 @@ +# Test code for django_command module +# +# Copyright (c) 2020, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Create temporary test directory + tempfile: + state: directory + suffix: .django_command + register: tmp_django_root + +- name: Install OS package virtualenv + package: + name: "{{ os_package_name.virtualenv }}" + state: present + +- name: Ensure virtualenv is created + command: >- + virtualenv {{ tmp_django_root.path }}/venv + +- name: Update python package pip + pip: + name: pip + state: latest + virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Install python package django + pip: + name: django + state: present + virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Copy files + copy: + src: base_test/ + dest: "{{ tmp_django_root.path }}" + mode: preserve + +- name: Create project + command: + chdir: "{{ tmp_django_root.path }}/startproj" + cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startproject test_django_command_1" + +- name: Create app + command: + chdir: "{{ tmp_django_root.path }}/startproj" + cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startapp app1" + +- name: Check + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/startproj/test_django_command_1" + settings: test_django_command_1.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Check simple_project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Check custom project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/1045-single-app-project/single_app_project" + settings: core.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Run collectstatic --noinput on simple project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: collectstatic --noinput + venv: "{{ tmp_django_root.path }}/venv" + +- name: Trigger exception with environment variable + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: collectstatic --noinput + venv: "{{ tmp_django_root.path }}/venv" + environment: + DJANGO_ANSIBLE_RAISE: blah + ignore_errors: true + register: env_raise + +- name: Check env variable reached manage.py + ansible.builtin.assert: + that: + - "'ValueError: DJANGO_ANSIBLE_RAISE=blah' in env_raise.msg" From 7c913b239a6ec26bb92cfbb185499f5fb9a2c841 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 1 Oct 2024 21:55:04 +0300 Subject: [PATCH 258/482] Pass absolute paths to atomic_move() (#8925) Pass absolute paths to atmoic_move(). --- changelogs/fragments/8925-atomic.yml | 6 ++++++ plugins/modules/ini_file.py | 2 +- plugins/modules/java_keystore.py | 2 +- plugins/modules/jenkins_plugin.py | 4 ++-- plugins/modules/kdeconfig.py | 2 +- plugins/modules/pam_limits.py | 2 +- 6 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8925-atomic.yml diff --git a/changelogs/fragments/8925-atomic.yml b/changelogs/fragments/8925-atomic.yml new file mode 100644 index 0000000000..75e48a1dba --- /dev/null +++ b/changelogs/fragments/8925-atomic.yml @@ -0,0 +1,6 @@ +bugfixes: + - "ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." + - "java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." + - "jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." + - "kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." + - "pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index affee2a4f7..18a79ce122 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -569,7 +569,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) try: - module.atomic_move(tmpfile, target_filename) + module.atomic_move(tmpfile, os.path.abspath(target_filename)) except IOError: module.ansible.fail_json(msg='Unable to move temporary \ file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc()) diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index 7da52cc057..0a8e3398d5 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -470,7 +470,7 @@ class JavaKeystore: if self.keystore_type == 'pkcs12': # Preserve properties of the destination file, if any. - self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.module.atomic_move(os.path.abspath(keystore_p12_path), os.path.abspath(self.keystore_path)) self.update_permissions() self.result['changed'] = True return self.result diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py index 13a804a508..8834e0a2b2 100644 --- a/plugins/modules/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -685,7 +685,7 @@ class JenkinsPlugin(object): # Move the updates file to the right place if we could read it if tmp_updates_file != updates_file: - self.module.atomic_move(tmp_updates_file, updates_file) + self.module.atomic_move(os.path.abspath(tmp_updates_file), os.path.abspath(updates_file)) # Check if we have the plugin data available if not data.get('plugins', {}).get(self.params['name']): @@ -718,7 +718,7 @@ class JenkinsPlugin(object): details=to_native(e)) # Move the file onto the right place - self.module.atomic_move(tmp_f, f) + self.module.atomic_move(os.path.abspath(tmp_f), os.path.abspath(f)) def uninstall(self): changed = False diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py index 4e8d395215..96d7df8b8d 100644 --- a/plugins/modules/kdeconfig.py +++ b/plugins/modules/kdeconfig.py @@ -214,7 +214,7 @@ def run_module(module, tmpdir, kwriteconfig): if module.params['backup'] and os.path.exists(b_path): result['backup_file'] = module.backup_local(result['path']) try: - module.atomic_move(b_tmpfile, b_path) + module.atomic_move(b_tmpfile, os.path.abspath(b_path)) except IOError: module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc()) diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py index f97ea6602b..4ed037a6ff 100644 --- a/plugins/modules/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -339,7 +339,7 @@ def main(): pass # Move tempfile to newfile - module.atomic_move(nf.name, limits_conf) + module.atomic_move(os.path.abspath(nf.name), os.path.abspath(limits_conf)) try: nf.close() From 83080cc0054b62c0c4624e66ce4501cf14fc0b21 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Wed, 2 Oct 2024 08:48:04 +0200 Subject: [PATCH 259/482] keycloak_userprofile: fix empty response by removing `parent` filter when fetching userprofile component (#8923) * remove parent filter when fetching userprofile component * add changelog fragment * Update changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...userprofile-fix-empty-response-when-fetching-userprofile.yml | 2 ++ plugins/modules/keycloak_userprofile.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml diff --git a/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml b/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml new file mode 100644 index 0000000000..5b3c18ba2c --- /dev/null +++ b/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). \ No newline at end of file diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py index ba5dc127d2..55971cbf42 100644 --- a/plugins/modules/keycloak_userprofile.py +++ b/plugins/modules/keycloak_userprofile.py @@ -641,7 +641,7 @@ def main(): changeset_copy = deepcopy(changeset) # Get a list of all Keycloak components that are of userprofile provider type. - realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id) + realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type)), parent_id) # If this component is present get its userprofile ID. Confusingly the userprofile ID is # also known as the Provider ID. From 2d660a1252c5c40333f0378ba485680426c33cd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1redy=20Alves?= Date: Wed, 2 Oct 2024 07:48:45 +0100 Subject: [PATCH 260/482] flatpak: improve flatpak name parsing in `_parse_flatpak_name` (#8909) * flatpak: improve flatpak name parsing in `_parse_flatpak_name` * changelog: add changelog fragment * flatpak: fix condition in `_is_flatpak_id` function * chore: update changelog fragment * docs(flatpak): add guidelines for application IDs in comments --- .../8909-flatpak-improve-name-parsing.yaml | 2 ++ plugins/modules/flatpak.py | 28 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/8909-flatpak-improve-name-parsing.yaml diff --git a/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml b/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml new file mode 100644 index 0000000000..26a9379235 --- /dev/null +++ b/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml @@ -0,0 +1,2 @@ +minor_changes: + - flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909). diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index 15e404d45b..09e49e5575 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -329,13 +329,39 @@ def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method return row.split()[0] +def _is_flatpak_id(part): + # For guidelines on application IDs, refer to the following resources: + # Flatpak: + # https://docs.flatpak.org/en/latest/conventions.html#application-ids + # Flathub: + # https://docs.flathub.org/docs/for-app-authors/requirements#application-id + if '.' not in part: + return False + sections = part.split('.') + if len(sections) < 2: + return False + domain = sections[0] + if not domain.islower(): + return False + for section in sections[1:]: + if not section.isalnum(): + return False + return True + + def _parse_flatpak_name(name): if name.startswith('http://') or name.startswith('https://'): file_name = urlparse(name).path.split('/')[-1] file_name_without_extension = file_name.split('.')[0:-1] common_name = ".".join(file_name_without_extension) else: - common_name = name + parts = name.split('/') + for part in parts: + if _is_flatpak_id(part): + common_name = part + break + else: + common_name = name return common_name From daaa008713ccb73bdf0b2f2dd8b2759c7cd02841 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 3 Oct 2024 07:00:03 +1300 Subject: [PATCH 261/482] pipx: remove unused param from the runner ctx.run() call (#8965) remove unused param from the runner ctx.run() call --- plugins/modules/pipx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 4b94dee2ac..f9ad13980d 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -308,7 +308,7 @@ class PipX(StateModuleHelper): def state_install_all(self): self.changed = True with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: - ctx.run(name_source=[self.vars.name, self.vars.source]) + ctx.run() self._capture_results(ctx) def state_upgrade(self): From 96dfb89b0171b9b3c9af009a462327b329aa1ee6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 3 Oct 2024 07:23:14 +1300 Subject: [PATCH 262/482] cmd_runner_guide: docs improvements (#8963) * cmd_runner_guide: docs improvements * add note about suboptions --- docs/docsite/rst/guide_cmdrunner.rst | 60 +++++++++++++++++++--------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst index d4f12cf81e..d491769c18 100644 --- a/docs/docsite/rst/guide_cmdrunner.rst +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -68,20 +68,27 @@ This is meant to be done once, then every time you need to execute the command y with runner("version") as ctx: dummy, stdout, dummy = ctx.run() + # passes arg 'data' to AnsibleModule.run_command() + with runner("type name", data=stdin_data) as ctx: + dummy, stdout, dummy = ctx.run() + # Another way of expressing it dummy, stdout, dummy = runner("version").run() -Note that you can pass values for the arguments when calling ``run()``, -otherwise ``CmdRunner`` uses the module options with the exact same names to -provide values for the runner arguments. If no value is passed and no module option -is found for the name specified, then an exception is raised, unless the -argument is using ``cmd_runner_fmt.as_fixed`` as format function like the -``version`` in the example above. See more about it below. +Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner`` +uses the module options with the exact same names to provide values for the runner arguments. +If no value is passed and no module option is found for the name specified, then an exception is raised, unless +the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above. +See more about it below. In the first example, values of ``type``, ``force``, ``no_deps`` and others are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are passed explicitly. +.. note:: + + It is not possible to automatically retrieve values of suboptions. + That generates a resulting command line similar to (example taken from the output of an integration test): @@ -110,7 +117,7 @@ into something formatted for the command line. Argument format function """""""""""""""""""""""" -An ``arg_format`` function should be of the form: +An ``arg_format`` function is defined in the form similar to: .. code-block:: python @@ -155,7 +162,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_list()`` - - Example: + - Examples: +----------------------+---------------------+ | Value | Outcome | +======================+=====================+ @@ -167,12 +174,11 @@ In these descriptions ``value`` refers to the single parameter passed to the for - ``cmd_runner_fmt.as_bool()`` This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. - If the boolean evaluation is ``False``, then the function returns ``args_false`` - if it was provided, or ``[]`` otherwise. + If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise. - - Creation: + - Creation (one arg): ``cmd_runner_fmt.as_bool("--force")`` - - Example: + - Examples: +------------+--------------------+ | Value | Outcome | +============+====================+ @@ -180,6 +186,18 @@ In these descriptions ``value`` refers to the single parameter passed to the for +------------+--------------------+ | ``False`` | ``[]`` | +------------+--------------------+ + - Creation (two args): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``[]`` | + +------------+----------------------+ - ``cmd_runner_fmt.as_bool_not()`` This method receives one parameter, which is returned by the function when the boolean evaluation @@ -187,7 +205,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_bool_not("--no-deps")`` - - Example: + - Examples: +-------------+---------------------+ | Value | Outcome | +=============+=====================+ @@ -202,7 +220,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_optval("-i")`` - - Example: + - Examples: +---------------+---------------------+ | Value | Outcome | +===============+=====================+ @@ -216,7 +234,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_opt_val("--name")`` - - Example: + - Examples: +--------------+--------------------------+ | Value | Outcome | +==============+==========================+ @@ -229,7 +247,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` - - Example: + - Examples: +------------+-------------------------+ | Value | Outcome | +============+=========================+ @@ -243,7 +261,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_fixed("--version")`` - - Example: + - Examples: +---------+-----------------------+ | Value | Outcome | +=========+=======================+ @@ -265,7 +283,7 @@ In these descriptions ``value`` refers to the single parameter passed to the for - Creation: ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` - - Example: + - Examples: +---------------------+---------------+ | Value | Outcome | +=====================+===============+ @@ -359,6 +377,8 @@ Settings that can be passed to the ``CmdRunner`` constructor are: Command to be executed. It can be a single string, the executable name, or a list of strings containing the executable name as the first element and, optionally, fixed parameters. Those parameters are used in all executions of the runner. + The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is + processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``. - ``arg_formats: dict`` Mapping of argument names to formatting functions. - ``default_args_order: str`` @@ -394,6 +414,10 @@ When creating a context, the additional settings that can be passed to the call Defaults to ``False``. - ``check_mode_return: any`` If ``check_mode_skip=True``, then return this value instead. +- valid named arguments to ``AnsibleModule.run_command()`` + Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context. + For example, ``data`` can be used to send information to the command's standard input. + Or ``cwd`` can be used to run the command inside a specific working directory. Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior might occur if redefining options already present in the runner or its context creation. Use with caution. From c4e2b731939e56a24870e9e3455381dc33ea3040 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 3 Oct 2024 07:24:24 +1300 Subject: [PATCH 263/482] cmd_runner_fmt: refactor out to its own file (#8964) * cmd_runner_fmt: refactor out to its own file * add new file to BOTMETA.yml * add changelog frag --- .github/BOTMETA.yml | 2 + .../8964-cmd-runner-argformat-refactor.yml | 2 + plugins/module_utils/cmd_runner.py | 115 +--------------- plugins/module_utils/cmd_runner_fmt.py | 123 ++++++++++++++++++ 4 files changed, 130 insertions(+), 112 deletions(-) create mode 100644 changelogs/fragments/8964-cmd-runner-argformat-refactor.yml create mode 100644 plugins/module_utils/cmd_runner_fmt.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c9326fa75a..10f2aee95b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -310,6 +310,8 @@ files: labels: module_utils $module_utils/btrfs.py: maintainers: gnfzdz + $module_utils/cmd_runner_fmt.py: + maintainers: russoz $module_utils/cmd_runner.py: maintainers: russoz $module_utils/deps.py: diff --git a/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml b/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml new file mode 100644 index 0000000000..be8adf25e3 --- /dev/null +++ b/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd_runner module utils - refactor argument formatting code to its own Python module (https://github.com/ansible-collections/community.general/pull/8964). diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 5cd4f6b957..10278964bb 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -7,10 +7,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import os -from functools import wraps from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.common.locale import get_best_parsable_locale +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt def _ensure_list(value): @@ -88,112 +88,6 @@ class FormatError(CmdRunnerException): ) -class _ArgFormat(object): - # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0 - def __init__(self, func, ignore_none=None, ignore_missing_value=False): - self.func = func - self.ignore_none = ignore_none - self.ignore_missing_value = ignore_missing_value - - # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0 - def __call__(self, value, ctx_ignore_none=True): - # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0 - ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none - if value is None and ignore_none: - return [] - f = self.func - return [str(x) for x in f(value)] - - def __str__(self): - return "".format( - self.func, - self.ignore_none, - self.ignore_missing_value, - ) - - def __repr__(self): - return str(self) - - -class _Format(object): - @staticmethod - def as_bool(args_true, args_false=None, ignore_none=None): - if args_false is not None: - if ignore_none is None: - ignore_none = False - else: - args_false = [] - return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) - - @staticmethod - def as_bool_not(args): - return _Format.as_bool([], args, ignore_none=False) - - @staticmethod - def as_optval(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_opt_val(arg, ignore_none=None): - return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) - - @staticmethod - def as_opt_eq_val(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_list(ignore_none=None, min_len=0, max_len=None): - def func(value): - value = _ensure_list(value) - if len(value) < min_len: - raise ValueError("Parameter must have at least {0} element(s)".format(min_len)) - if max_len is not None and len(value) > max_len: - raise ValueError("Parameter must have at most {0} element(s)".format(max_len)) - return value - return _ArgFormat(func, ignore_none=ignore_none) - - @staticmethod - def as_fixed(args): - return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) - - @staticmethod - def as_func(func, ignore_none=None): - return _ArgFormat(func, ignore_none=ignore_none) - - @staticmethod - def as_map(_map, default=None, ignore_none=None): - if default is None: - default = [] - return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) - - @staticmethod - def unpack_args(func): - @wraps(func) - def wrapper(v): - return func(*v) - return wrapper - - @staticmethod - def unpack_kwargs(func): - @wraps(func) - def wrapper(v): - return func(**v) - return wrapper - - @staticmethod - def stack(fmt): - @wraps(fmt) - def wrapper(*args, **kwargs): - new_func = fmt(ignore_none=True, *args, **kwargs) - - def stacking(value): - stack = [new_func(v) for v in value if v] - stack = [x for args in stack for x in args] - return stack - return _ArgFormat(stacking, ignore_none=True) - return wrapper - - class CmdRunner(object): """ Wrapper for ``AnsibleModule.run_command()``. @@ -215,8 +109,8 @@ class CmdRunner(object): arg_formats = {} self.arg_formats = {} for fmt_name, fmt in arg_formats.items(): - if not isinstance(fmt, _ArgFormat): - fmt = _Format.as_func(func=fmt, ignore_none=True) + if not cmd_runner_fmt.is_argformat(fmt): + fmt = cmd_runner_fmt.as_func(func=fmt, ignore_none=True) self.arg_formats[fmt_name] = fmt self.check_rc = check_rc if force_lang == "auto": @@ -350,6 +244,3 @@ class _CmdRunnerContext(object): def __exit__(self, exc_type, exc_val, exc_tb): return False - - -cmd_runner_fmt = _Format() diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py new file mode 100644 index 0000000000..bd6d00a15d --- /dev/null +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from functools import wraps + +from ansible.module_utils.common.collections import is_sequence + + +def _ensure_list(value): + return list(value) if is_sequence(value) else [value] + + +class _ArgFormat(object): + # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0 + def __init__(self, func, ignore_none=None, ignore_missing_value=False): + self.func = func + self.ignore_none = ignore_none + self.ignore_missing_value = ignore_missing_value + + # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0 + def __call__(self, value, ctx_ignore_none=True): + # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0 + ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none + if value is None and ignore_none: + return [] + f = self.func + return [str(x) for x in f(value)] + + def __str__(self): + return "".format( + self.func, + self.ignore_none, + self.ignore_missing_value, + ) + + def __repr__(self): + return str(self) + + +def as_bool(args_true, args_false=None, ignore_none=None): + if args_false is not None: + if ignore_none is None: + ignore_none = False + else: + args_false = [] + return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) + + +def as_bool_not(args): + return as_bool([], args, ignore_none=False) + + +def as_optval(arg, ignore_none=None): + return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none) + + +def as_opt_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) + + +def as_opt_eq_val(arg, ignore_none=None): + return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none) + + +def as_list(ignore_none=None, min_len=0, max_len=None): + def func(value): + value = _ensure_list(value) + if len(value) < min_len: + raise ValueError("Parameter must have at least {0} element(s)".format(min_len)) + if max_len is not None and len(value) > max_len: + raise ValueError("Parameter must have at most {0} element(s)".format(max_len)) + return value + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_fixed(args): + return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) + + +def as_func(func, ignore_none=None): + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_map(_map, default=None, ignore_none=None): + if default is None: + default = [] + return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) + + +def unpack_args(func): + @wraps(func) + def wrapper(v): + return func(*v) + return wrapper + + +def unpack_kwargs(func): + @wraps(func) + def wrapper(v): + return func(**v) + return wrapper + + +def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + + +def is_argformat(fmt): + return isinstance(fmt, _ArgFormat) From 7fc7af306c973c0f3a8d6900f16209b6555d307a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 4 Oct 2024 01:19:13 +1300 Subject: [PATCH 264/482] fix doc for cmd_runner_fmt.as_bool() (#8971) --- docs/docsite/rst/guide_cmdrunner.rst | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst index d491769c18..f7b70a86e1 100644 --- a/docs/docsite/rst/guide_cmdrunner.rst +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -186,8 +186,20 @@ In these descriptions ``value`` refers to the single parameter passed to the for +------------+--------------------+ | ``False`` | ``[]`` | +------------+--------------------+ - - Creation (two args): + - Creation (two args, ``None`` treated as ``False``): ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``["--dont-do-it"]`` | + +------------+----------------------+ + - Creation (two args, ``None`` is ignored): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)`` - Examples: +------------+----------------------+ | Value | Outcome | From d4fb6bf8a65afabe3576b49af906585425571ac3 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:37:18 +0300 Subject: [PATCH 265/482] nmcli: conn_reload param and up/down states (#8897) * Update nmcli module * Update nmcli state * Update test_nmcli * Add CHANGELOG fragment * PR Fixes * Fix DOCUMENTATION block --- .../8897-nmcli-add-reload-and-up-down.yml | 3 + plugins/modules/nmcli.py | 61 ++++++++++++++++++- tests/unit/plugins/modules/test_nmcli.py | 1 + 3 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml diff --git a/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml b/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml new file mode 100644 index 0000000000..68f481452c --- /dev/null +++ b/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). + - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index e48183d049..e2803432a9 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -34,9 +34,11 @@ options: state: description: - Whether the device should exist or not, taking action if the state is different from what is stated. + - Using O(state=present) to create connection will automatically bring connection up. + - Using O(state=up) and O(state=down) will not modify connection with other parameters. These states have been added in community.general 9.5.0. type: str required: true - choices: [ absent, present ] + choices: [ absent, present, up, down ] autoconnect: description: - Whether the connection should start on boot. @@ -48,6 +50,13 @@ options: - The name used to call the connection. Pattern is [-][-]. type: str required: true + conn_reload: + description: + - Whether the connection should be reloaded if it was modified. + type: bool + required: false + default: false + version_added: 9.5.0 ifname: description: - The interface to bind the connection to. @@ -1309,6 +1318,25 @@ EXAMPLES = r''' type: ethernet state: present + - name: Change the property of a setting e.g. MTU and reload connection + community.general.nmcli: + conn_name: my-eth1 + mtu: 1500 + type: ethernet + state: present + conn_reload: true + + - name: Disable connection + community.general.nmcli: + conn_name: my-eth1 + state: down + + - name: Reload and enable connection + community.general.nmcli: + conn_name: my-eth1 + state: up + reload: true + - name: Add second ip4 address community.general.nmcli: conn_name: my-eth1 @@ -1581,6 +1609,7 @@ class Nmcli(object): self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] self.autoconnect = module.params['autoconnect'] self.conn_name = module.params['conn_name'] + self.conn_reload = module.params['conn_reload'] self.slave_type = module.params['slave_type'] self.master = module.params['master'] self.ifname = module.params['ifname'] @@ -2165,6 +2194,10 @@ class Nmcli(object): cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] return self.execute_command(cmd) + def reload_connection(self): + cmd = [self.nmcli_bin, 'con', 'reload'] + return self.execute_command(cmd) + def connection_update(self, nmcli_command): if nmcli_command == 'create': cmd = [self.nmcli_bin, 'con', 'add', 'type'] @@ -2431,8 +2464,9 @@ def main(): argument_spec=dict( ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), - state=dict(type='str', required=True, choices=['absent', 'present']), + state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', default=False), master=dict(type='str'), slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port']), ifname=dict(type='str'), @@ -2639,6 +2673,8 @@ def main(): if module.check_mode: module.exit_json(changed=True, **result) (rc, out, err) = nmcli.modify_connection() + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() else: result['Exists'] = 'Connections already exist and no changes made' if module.check_mode: @@ -2650,6 +2686,27 @@ def main(): (rc, out, err) = nmcli.create_connection() if rc is not None and rc != 0: module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + elif nmcli.state == 'up': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.up_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'down': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.down_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + except NmcliModuleError as e: module.fail_json(name=nmcli.conn_name, msg=str(e)) diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py index 8c9c007ace..570b04d56f 100644 --- a/tests/unit/plugins/modules/test_nmcli.py +++ b/tests/unit/plugins/modules/test_nmcli.py @@ -4251,6 +4251,7 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): autoconnect=dict(type='bool', default=True), state=dict(type='str', required=True, choices=['absent', 'present']), conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', required=False, default=False), master=dict(type='str'), slave_type=dict(type=str, choices=['bond', 'bridge', 'team']), ifname=dict(type='str'), From 5d9a7ab2400badc11a236c86076affa4327acb72 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Thu, 3 Oct 2024 14:38:09 +0200 Subject: [PATCH 266/482] keycloak_user_federation: remove `lastSync` param from kc API responses (#8812) * remove `lastSync` param from kc API responses * add blank line to satisfy sanity check * add changelog fragment * fix NoneType error introduced by changed normalize func return value --- ...-federation-remove-lastSync-param-from-kc-responses.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml diff --git a/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml b/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml new file mode 100644 index 0000000000..82496d1083 --- /dev/null +++ b/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 06283a025e..0b3b610806 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -722,12 +722,15 @@ from copy import deepcopy def normalize_kc_comp(comp): - # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; - # to make change detection and diff more accurate we set it again in the kc responses if 'config' in comp: + # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; + # to make change detection and diff more accurate we set it again in the kc responses if 'krbPrincipalAttribute' not in comp['config']: comp['config']['krbPrincipalAttribute'] = [''] + # kc stores a timestamp of the last sync in `lastSync` to time the periodic sync, it is removed to minimize diff/changes + comp['config'].pop('lastSync', None) + def sanitize(comp): compcopy = deepcopy(comp) From 92df5e8fec42aedd60bfb84fad754bf1d0081e77 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:39:17 +0300 Subject: [PATCH 267/482] open_iscsi: Make targets optional for a portal login (#8719) * Make targets optional for a portal login * Add changelog * Fix check_rc variable * Fix idempotence * Fix linting * PR fixes * Linter fixes * PR fixes * Change variable name --- .../8719-openiscsi-add-multiple-targets.yaml | 2 + plugins/modules/open_iscsi.py | 94 +++++++++++++------ 2 files changed, 66 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml diff --git a/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml b/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml new file mode 100644 index 0000000000..16e523d83d --- /dev/null +++ b/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml @@ -0,0 +1,2 @@ +minor_changes: + - open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719). diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py index 163042cc42..df8a694a7e 100644 --- a/plugins/modules/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -45,6 +45,7 @@ options: login: description: - Whether the target node should be connected. + - When O(target) is omitted, will login to all available. type: bool aliases: [ state ] node_auth: @@ -101,7 +102,6 @@ options: type: bool default: false version_added: 4.1.0 - ''' EXAMPLES = r''' @@ -117,8 +117,7 @@ EXAMPLES = r''' discover: true ip: 10.1.2.3 -# NOTE: Only works if exactly one target is exported to the initiator -- name: Discover targets on portal and login to the one available +- name: Discover targets on portal and login to the ones available community.general.open_iscsi: portal: '{{ iscsi_target }}' login: true @@ -227,7 +226,7 @@ def target_loggedon(module, target, portal=None, port=None): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_login(module, target, portal=None, port=None): +def target_login(module, target, check_rc, portal=None, port=None): node_auth = module.params['node_auth'] node_user = module.params['node_user'] node_pass = module.params['node_pass'] @@ -240,21 +239,22 @@ def target_login(module, target, portal=None, port=None): ('node.session.auth.password', node_pass)] for (name, value) in params: cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=check_rc) if node_user_in: params = [('node.session.auth.username_in', node_user_in), ('node.session.auth.password_in', node_pass_in)] for (name, value) in params: cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=check_rc) cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] if portal is not None and port is not None: cmd.append('--portal') cmd.append('%s:%s' % (portal, port)) - module.run_command(cmd, check_rc=True) + rc, out, err = module.run_command(cmd, check_rc=check_rc) + return rc def target_logout(module, target): @@ -339,7 +339,10 @@ def main(): ), required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], - required_if=[('discover', True, ['portal'])], + required_if=[ + ('discover', True, ['portal']), + ('auto_node_startup', True, ['target']), + ('auto_portal_startup', True, ['target'])], supports_check_mode=True, ) @@ -369,6 +372,8 @@ def main(): # return json dict result = {'changed': False} + login_to_all_nodes = False + check_rc = True if discover: if check: @@ -385,9 +390,10 @@ def main(): if login is not None or automatic is not None: if target is None: if len(nodes) > 1: - module.fail_json(msg="Need to specify a target") - else: - target = nodes[0] + # Disable strict return code checking if there are multiple targets + # That will allow to skip target where we have no rights to login + login_to_all_nodes = True + check_rc = False else: # check given target is in cache check_target = False @@ -402,26 +408,54 @@ def main(): result['nodes'] = nodes if login is not None: - loggedon = target_loggedon(module, target, portal, port) - if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False - if login: - result['devicenodes'] = target_device_node(target) - elif not check: - if login: - target_login(module, target, portal, port) - # give udev some time - time.sleep(1) - result['devicenodes'] = target_device_node(target) - else: - target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True + if login_to_all_nodes: + result['devicenodes'] = [] + for index_target in nodes: + loggedon = target_loggedon(module, index_target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] += target_device_node(index_target) + elif not check: + if login: + login_result = target_login(module, index_target, check_rc, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] += target_device_node(index_target) + else: + target_logout(module, index_target) + # Check if there are multiple targets on a single portal and + # do not mark the task changed if host could not login to one of them + if len(nodes) > 1 and login_result == 24: + result['changed'] |= False + result['connection_changed'] = False + else: + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True else: - result['changed'] |= True - result['connection_changed'] = True + loggedon = target_loggedon(module, target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True - if automatic is not None: + if automatic is not None and not login_to_all_nodes: isauto = target_isauto(module, target) if (automatic and isauto) or (not automatic and not isauto): result['changed'] |= False @@ -437,7 +471,7 @@ def main(): result['changed'] |= True result['automatic_changed'] = True - if automatic_portal is not None: + if automatic_portal is not None and not login_to_all_nodes: isauto = target_isauto(module, target, portal, port) if (automatic_portal and isauto) or (not automatic_portal and not isauto): result['changed'] |= False From e7ccbc2f18641047625aa360f6863e203aebde8b Mon Sep 17 00:00:00 2001 From: Julien Lecomte Date: Fri, 4 Oct 2024 09:10:43 +0200 Subject: [PATCH 268/482] Add gitlab group params (#8908) Add new gitlab_group parameters --- .../8908-add-gitlab-group-params.yml | 2 + plugins/modules/gitlab_group.py | 174 +++++++++++++++--- plugins/modules/gitlab_project.py | 4 +- 3 files changed, 152 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/8908-add-gitlab-group-params.yml diff --git a/changelogs/fragments/8908-add-gitlab-group-params.yml b/changelogs/fragments/8908-add-gitlab-group-params.yml new file mode 100644 index 0000000000..12de77b43a --- /dev/null +++ b/changelogs/fragments/8908-add-gitlab-group-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 74925430a1..f8db33360c 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -44,10 +44,24 @@ options: - This option is only used on creation, not for updates. type: path version_added: 4.2.0 + default_branch: + description: + - All merge requests and commits are made against this branch unless you specify a different one. + type: str + version_added: 9.5.0 description: description: - A description for the group. type: str + enabled_git_access_protocol: + description: + - V(all) means SSH and HTTP(S) is enabled. + - V(ssh) means only SSH is enabled. + - V(http) means only HTTP(S) is enabled. + - Only available for top level groups. + choices: ["all", "ssh", "http"] + type: str + version_added: 9.5.0 force_delete: description: - Force delete group even if projects in it. @@ -55,6 +69,27 @@ options: type: bool default: false version_added: 7.5.0 + lfs_enabled: + description: + - Projects in this group can use Git LFS. + type: bool + version_added: 9.5.0 + lock_duo_features_enabled: + description: + - Enforce GitLab Duo features for all subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + membership_lock: + description: + - Users cannot be added to projects in this group. + type: bool + version_added: 9.5.0 + mentions_disabled: + description: + - Group mentions are disabled. + type: bool + version_added: 9.5.0 name: description: - Name of the group you want to create. @@ -70,12 +105,40 @@ options: - The path of the group you want to create, this will be api_url/group_path - If not supplied, the group_name will be used. type: str + prevent_forking_outside_group: + description: + - Prevent forking outside of the group. + type: bool + version_added: 9.5.0 + prevent_sharing_groups_outside_hierarchy: + description: + - Members cannot invite groups outside of this group and its subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 project_creation_level: description: - Determine if developers can create projects in the group. choices: ["developer", "maintainer", "noone"] type: str version_added: 3.7.0 + request_access_enabled: + description: + - Users can request access (if visibility is public or internal). + type: bool + version_added: 9.5.0 + service_access_tokens_expiration_enforced: + description: + - Service account token expiration. + - Changes will not affect existing token expiration dates. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + share_with_group_lock: + description: + - Projects cannot be shared with other groups. + type: bool + version_added: 9.5.0 require_two_factor_authentication: description: - Require all users in this group to setup two-factor authentication. @@ -94,12 +157,25 @@ options: choices: ["maintainer", "owner"] type: str version_added: 3.7.0 + two_factor_grace_period: + description: + - Delay 2FA enforcement (hours). + type: str + version_added: 9.5.0 visibility: description: - Default visibility of the group choices: ["private", "internal", "public"] default: private type: str + wiki_access_level: + description: + - V(enabled) means everyone can access the wiki. + - V(private) means only members of this group can access the wiki. + - V(disabled) means group-level wiki is disabled. + choices: ["enabled", "private", "disabled"] + type: str + version_added: 9.5.0 ''' EXAMPLES = ''' @@ -202,23 +278,38 @@ class GitLabGroup(object): def create_or_update_group(self, name, parent, options): changed = False + payload = { + 'auto_devops_enabled': options['auto_devops_enabled'], + 'default_branch': options['default_branch'], + 'description': options['description'], + 'lfs_enabled': options['lfs_enabled'], + 'membership_lock': options['membership_lock'], + 'mentions_disabled': options['mentions_disabled'], + 'name': name, + 'path': options['path'], + 'prevent_forking_outside_group': options['prevent_forking_outside_group'], + 'project_creation_level': options['project_creation_level'], + 'request_access_enabled': options['request_access_enabled'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'share_with_group_lock': options['share_with_group_lock'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], + 'wiki_access_level': options['wiki_access_level'], + } + if options.get('enabled_git_access_protocol') and parent is None: + payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] + if options.get('lock_duo_features_enabled') and parent is None: + payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] + if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: + payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] + if options.get('service_access_tokens_expiration_enforced') and parent is None: + payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] + if options.get('two_factor_grace_period'): + payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + # Because we have already call userExists in main() if self.group_object is None: - parent_id = self.get_group_id(parent) - - payload = { - 'auto_devops_enabled': options['auto_devops_enabled'], - 'name': name, - 'parent_id': parent_id, - 'path': options['path'], - 'project_creation_level': options['project_creation_level'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'visibility': options['visibility'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] + payload['parent_id'] = self.get_group_id(parent) group = self.create_group(payload) # add avatar to group @@ -229,15 +320,7 @@ class GitLabGroup(object): self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) changed = True else: - changed, group = self.update_group(self.group_object, { - 'auto_devops_enabled': options['auto_devops_enabled'], - 'description': options['description'], - 'name': name, - 'project_creation_level': options['project_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'visibility': options['visibility'], - }) + changed, group = self.update_group(self.group_object, payload) self.group_object = group if changed: @@ -324,16 +407,29 @@ def main(): argument_spec.update(dict( auto_devops_enabled=dict(type='bool'), avatar_path=dict(type='path'), + default_branch=dict(type='str'), description=dict(type='str'), + enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), force_delete=dict(type='bool', default=False), + lfs_enabled=dict(type='bool'), + lock_duo_features_enabled=dict(type='bool'), + membership_lock=dict(type='bool'), + mentions_disabled=dict(type='bool'), name=dict(type='str', required=True), parent=dict(type='str'), path=dict(type='str'), + prevent_forking_outside_group=dict(type='bool'), + prevent_sharing_groups_outside_hierarchy=dict(type='bool'), project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + request_access_enabled=dict(type='bool'), require_two_factor_authentication=dict(type='bool'), + service_access_tokens_expiration_enforced=dict(type='bool'), + share_with_group_lock=dict(type='bool'), state=dict(type='str', default="present", choices=["absent", "present"]), subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + two_factor_grace_period=dict(type='str'), visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), )) module = AnsibleModule( @@ -359,16 +455,29 @@ def main(): auto_devops_enabled = module.params['auto_devops_enabled'] avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] description = module.params['description'] + enabled_git_access_protocol = module.params['enabled_git_access_protocol'] force_delete = module.params['force_delete'] group_name = module.params['name'] group_path = module.params['path'] group_visibility = module.params['visibility'] + lfs_enabled = module.params['lfs_enabled'] + lock_duo_features_enabled = module.params['lock_duo_features_enabled'] + membership_lock = module.params['membership_lock'] + mentions_disabled = module.params['mentions_disabled'] parent_identifier = module.params['parent'] + prevent_forking_outside_group = module.params['prevent_forking_outside_group'] + prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] project_creation_level = module.params['project_creation_level'] + request_access_enabled = module.params['request_access_enabled'] require_two_factor_authentication = module.params['require_two_factor_authentication'] + service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] + share_with_group_lock = module.params['share_with_group_lock'] state = module.params['state'] subgroup_creation_level = module.params['subgroup_creation_level'] + two_factor_grace_period = module.params['two_factor_grace_period'] + wiki_access_level = module.params['wiki_access_level'] # Define default group_path based on group_name if group_path is None: @@ -380,7 +489,7 @@ def main(): if parent_identifier: parent_group = find_group(gitlab_instance, parent_identifier) if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + module.fail_json(msg="Failed to create GitLab group: Parent group doesn't exist") group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) else: @@ -391,18 +500,31 @@ def main(): gitlab_group.delete_group(force=force_delete) module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) else: - module.exit_json(changed=False, msg="Group deleted or does not exists") + module.exit_json(changed=False, msg="Group deleted or does not exist") if state == 'present': if gitlab_group.create_or_update_group(group_name, parent_group, { "auto_devops_enabled": auto_devops_enabled, "avatar_path": avatar_path, + "default_branch": default_branch, "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, "visibility": group_visibility, + "wiki_access_level": wiki_access_level, }): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) else: diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index a85f2bd827..c5bfb4f21d 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -15,7 +15,7 @@ module: gitlab_project short_description: Creates/updates/deletes GitLab Projects description: - When the project does not exist in GitLab, it will be created. - - When the project does exists and O(state=absent), the project will be deleted. + - When the project does exist and O(state=absent), the project will be deleted. - When changes are made to the project, the project will be updated. author: - Werner Dijkerman (@dj-wasabi) @@ -716,7 +716,7 @@ def main(): if group_identifier: group = find_group(gitlab_instance, group_identifier) if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + module.fail_json(msg="Failed to create project: group %s doesn't exist" % group_identifier) namespace_id = group.id else: From fea0ffa5aa8d90a01616e596e03c6e78fb3f887c Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Sat, 5 Oct 2024 16:02:01 +0300 Subject: [PATCH 269/482] one_image/one_image_info: refactor (#8889) * Refactor one_image * Refactor one_image_info * Add examples one_image * Add CHANGELOG fragment * Add integration tests for one_image * Add integration tests for one_image_info * Update one_image DOC * Update one_image_info DOC * Update one_image DOC * Update one_image_info DOC * Fix f-strings for one_image * Update CHANGELOG fragment * PR fixes * PR fixes --- .../8889-refactor-one-image-modules.yml | 6 + plugins/module_utils/opennebula.py | 88 +++ plugins/modules/one_image.py | 608 +++++++++++------- plugins/modules/one_image_info.py | 395 +++++++----- tests/integration/targets/one_image/aliases | 7 + .../targets/one_image/tasks/main.yml | 210 ++++++ .../targets/one_image_info/aliases | 7 + .../targets/one_image_info/tasks/main.yml | 192 ++++++ 8 files changed, 1130 insertions(+), 383 deletions(-) create mode 100644 changelogs/fragments/8889-refactor-one-image-modules.yml create mode 100644 tests/integration/targets/one_image/aliases create mode 100644 tests/integration/targets/one_image/tasks/main.yml create mode 100644 tests/integration/targets/one_image_info/aliases create mode 100644 tests/integration/targets/one_image_info/tasks/main.yml diff --git a/changelogs/fragments/8889-refactor-one-image-modules.yml b/changelogs/fragments/8889-refactor-one-image-modules.yml new file mode 100644 index 0000000000..de552c17a6 --- /dev/null +++ b/changelogs/fragments/8889-refactor-one-image-modules.yml @@ -0,0 +1,6 @@ +minor_changes: + - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889). + - one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). + - one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). + - one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). + - one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index 94732e4f7c..24833350c6 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -16,6 +16,7 @@ from ansible.module_utils.six import string_types from ansible.module_utils.basic import AnsibleModule +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] HAS_PYONE = True try: @@ -347,3 +348,90 @@ class OpenNebulaModule: result: the Ansible result """ raise NotImplementedError("Method requires implementation") + + def get_image_list_id(self, image, element): + """ + This is a helper function for get_image_info to iterate over a simple list of objects + """ + list_of_id = [] + + if element == 'VMS': + image_list = image.VMS + if element == 'CLONES': + image_list = image.CLONES + if element == 'APP_CLONES': + image_list = image.APP_CLONES + + for iter in image_list.ID: + list_of_id.append( + # These are optional so firstly check for presence + getattr(iter, 'ID', 'Null'), + ) + return list_of_id + + def get_image_snapshots_list(self, image): + """ + This is a helper function for get_image_info to iterate over a dictionary + """ + list_of_snapshots = [] + + for iter in image.SNAPSHOTS.SNAPSHOT: + list_of_snapshots.append({ + 'date': iter['DATE'], + 'parent': iter['PARENT'], + 'size': iter['SIZE'], + # These are optional so firstly check for presence + 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), + 'children': getattr(iter, 'CHILDREN', 'Null'), + 'active': getattr(iter, 'ACTIVE', 'Null'), + 'name': getattr(iter, 'NAME', 'Null'), + }) + return list_of_snapshots + + def get_image_info(self, image): + """ + This method is used by one_image and one_image_info modules to retrieve + information from XSD scheme of an image + Returns: a copy of the parameters that includes the resolved parameters. + """ + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + 'permissions': { + 'owner_u': image.PERMISSIONS.OWNER_U, + 'owner_m': image.PERMISSIONS.OWNER_M, + 'owner_a': image.PERMISSIONS.OWNER_A, + 'group_u': image.PERMISSIONS.GROUP_U, + 'group_m': image.PERMISSIONS.GROUP_M, + 'group_a': image.PERMISSIONS.GROUP_A, + 'other_u': image.PERMISSIONS.OTHER_U, + 'other_m': image.PERMISSIONS.OTHER_M, + 'other_a': image.PERMISSIONS.OTHER_A + }, + 'type': image.TYPE, + 'disk_type': image.DISK_TYPE, + 'persistent': image.PERSISTENT, + 'regtime': image.REGTIME, + 'source': image.SOURCE, + 'path': image.PATH, + 'fstype': getattr(image, 'FSTYPE', 'Null'), + 'size': image.SIZE, + 'cloning_ops': image.CLONING_OPS, + 'cloning_id': image.CLONING_ID, + 'target_snapshot': image.TARGET_SNAPSHOT, + 'datastore_id': image.DATASTORE_ID, + 'datastore': image.DATASTORE, + 'vms': self.get_image_list_id(image, 'VMS'), + 'clones': self.get_image_list_id(image, 'CLONES'), + 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), + 'snapshots': self.get_image_snapshots_list(image), + 'template': image.TEMPLATE, + } + return info diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index a0081a0fe0..86db3b0405 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -17,6 +17,7 @@ description: requirements: - pyone extends_documentation_fragment: + - community.general.opennebula - community.general.attributes attributes: check_mode: @@ -24,23 +25,6 @@ attributes: diff_mode: support: none options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the E(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the E(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the E(ONE_PASSWORD) environment variable is used. - type: str id: description: - A O(id) of the image you would like to manage. @@ -67,6 +51,11 @@ options: - A name that will be assigned to the existing or new image. - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'. type: str + persistent: + description: + - Whether the image should be persistent or non-persistent. + type: bool + version_added: 9.5.0 author: - "Milan Ilic (@ilicmilan)" ''' @@ -92,6 +81,11 @@ EXAMPLES = ''' id: 37 enabled: false +- name: Make the IMAGE persistent + community.general.one_image: + id: 37 + persistent: true + - name: Enable the IMAGE by name community.general.one_image: name: bar-image @@ -114,300 +108,448 @@ RETURN = ''' id: description: image id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 153 name: description: image name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: app1 group_id: description: image's group id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 1 group_name: description: image's group name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: one-users owner_id: description: image's owner id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 143 owner_name: description: image's owner name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: ansible-test state: description: state of image instance type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: READY used: description: is image in use type: bool - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: true running_vms: description: count of running vms that use this image type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 7 +permissions: + description: The image's permissions. + type: dict + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +type: + description: The image's type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +disk_type: + description: The image's format type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + returned: when O(state=present), O(state=cloned), or O(state=renamed) +path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +fstype: + description: The image's filesystem type. + type: str + sample: ext4 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore: + description: The image's datastore name. + type: int + sample: image_datastore + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +vms: + description: The image's list of vm ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +clones: + description: The image's list of clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +snapshots: + description: The image's list of snapshots. + type: list + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName ''' -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_image(module, client, predicate): - # Filter -2 means fetch all images user can Use - pool = client.imagepool.info(-2, -1, -1, -1) - - for image in pool.IMAGE: - if predicate(image): - return image - - return None - - -def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.NAME == image_name)) - - -def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.ID == image_id)) - - -def get_image_instance(module, client, requested_id, requested_name): - if requested_id: - return get_image_by_id(module, client, requested_id) - else: - return get_image_by_name(module, client, requested_name) +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } +class ImageModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), + enabled=dict(type='bool', required=False), + new_name=dict(type='str', required=False), + persistent=dict(type='bool', required=False), + ) + required_if = [ + ['state', 'renamed', ['id']] + ] + mutually_exclusive = [ + ['id', 'name'], + ] - return info + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_if=required_if) + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + persistent = params.get('persistent') -def wait_for_state(module, client, image_id, wait_timeout, state_predicate): - import time - start_time = time.time() + self.result = {} + + image = self.get_image_instance(id, name) + if not image and desired_state != 'absent': + # Using 'if id:' doesn't work properly when id=0 + if id is not None: + module.fail_json(msg="There is no image with id=" + str(id)) + elif name is not None: + module.fail_json(msg="There is no image with name=" + name) + + if desired_state == 'absent': + self.result = self.delete_image(image) + else: + if persistent is not None: + self.result = self.change_persistence(image, persistent) + if enabled is not None: + self.result = self.enable_image(image, enabled) + if desired_state == "cloned": + self.result = self.clone_image(image, new_name) + elif desired_state == "renamed": + self.result = self.rename_image(image, new_name) + + self.exit() + + def get_image(self, predicate): + # Filter -2 means fetch all images user can Use + pool = self.one.imagepool.info(-2, -1, -1, -1) + + for image in pool.IMAGE: + if predicate(image): + return image + + return None + + def get_image_by_name(self, image_name): + return self.get_image(lambda image: (image.NAME == image_name)) + + def get_image_by_id(self, image_id): + return self.get_image(lambda image: (image.ID == image_id)) + + def get_image_instance(self, requested_id, requested_name): + # Using 'if requested_id:' doesn't work properly when requested_id=0 + if requested_id is not None: + return self.get_image_by_id(requested_id) + else: + return self.get_image_by_name(requested_name) + + def wait_for_ready(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image = self.one.image.info(image_id) + state = image.STATE + + if state in [IMAGE_STATES.index('ERROR')]: + self.module.fail_json(msg="Got an ERROR state: " + image.TEMPLATE['ERROR']) + + if state in [IMAGE_STATES.index('READY')]: + return True + + time.sleep(1) + self.module.fail_json(msg="Wait timeout has expired!") + + def wait_for_delete(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + # It might be already deleted by the time this function is called + try: + image = self.one.image.info(image_id) + except Exception: + check_image = self.get_image_instance(image_id) + if not check_image: + return True + + state = image.STATE + + if state in [IMAGE_STATES.index('DELETE')]: + return True + + time.sleep(1) + + self.module.fail_json(msg="Wait timeout has expired!") + + def enable_image(self, image, enable): + image = self.one.image.info(image.ID) + changed = False - while (time.time() - start_time) < wait_timeout: - image = client.image.info(image_id) state = image.STATE - if state_predicate(state): - return image + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - time.sleep(1) + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True - module.fail_json(msg="Wait timeout has expired!") + if changed and not self.module.check_mode: + self.one.image.enable(image.ID, enable) + result = OpenNebulaModule.get_image_info(image) + result['changed'] = changed -def wait_for_ready(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) - - -def wait_for_delete(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) - - -def enable_image(module, client, image, enable): - image = client.image.info(image.ID) - changed = False - - state = image.STATE - - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: - if enable: - module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") - else: - module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): - changed = True - - if changed and not module.check_mode: - client.image.enable(image.ID, enable) - - result = get_image_info(image) - result['changed'] = changed - - return result - - -def clone_image(module, client, image, new_name): - if new_name is None: - new_name = "Copy of " + image.NAME - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - result = get_image_info(tmp_image) - result['changed'] = False return result - if image.STATE == IMAGE_STATES.index('DISABLED'): - module.fail_json(msg="Cannot clone DISABLED image") + def change_persistence(self, image, enable): + image = self.one.image.info(image.ID) + changed = False - if not module.check_mode: - new_id = client.image.clone(image.ID, new_name) - wait_for_ready(module, client, new_id) - image = client.image.info(new_id) + state = image.STATE - result = get_image_info(image) - result['changed'] = True + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable persistence for " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable persistence for " + IMAGE_STATES[state] + " image!") - return result + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + if changed and not self.module.check_mode: + self.one.image.persistent(image.ID, enable) -def rename_image(module, client, image, new_name): - if new_name is None: - module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + result = OpenNebulaModule.get_image_info(image) + result['changed'] = changed - if new_name == image.NAME: - result = get_image_info(image) - result['changed'] = False return result - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + def clone_image(self, image, new_name): + if new_name is None: + new_name = "Copy of " + image.NAME - if not module.check_mode: - client.image.rename(image.ID, new_name) + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + result = OpenNebulaModule.get_image_info(tmp_image) + result['changed'] = False + return result - result = get_image_info(image) - result['changed'] = True - return result + if image.STATE == IMAGE_STATES.index('DISABLED'): + self.module.fail_json(msg="Cannot clone DISABLED image") + if not self.module.check_mode: + new_id = self.one.image.clone(image.ID, new_name) + self.wait_for_ready(new_id) + image = self.one.image.info(new_id) -def delete_image(module, client, image): + result = OpenNebulaModule.get_image_info(image) + result['changed'] = True - if not image: - return {'changed': False} + return result - if image.RUNNING_VMS > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") + def rename_image(self, image, new_name): + if new_name is None: + self.module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - if not module.check_mode: - client.image.delete(image.ID) - wait_for_delete(module, client, image.ID) + if new_name == image.NAME: + result = OpenNebulaModule.get_image_info(image) + result['changed'] = False + return result - return {'changed': True} + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + self.module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + if not self.module.check_mode: + self.one.image.rename(image.ID, new_name) -def get_connection_info(module): + result = OpenNebulaModule.get_image_info(image) + result['changed'] = True + return result - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') + def delete_image(self, image): + if not image: + return {'changed': False} - if not url: - url = os.environ.get('ONE_URL') + if image.RUNNING_VMS > 0: + self.module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") - if not username: - username = os.environ.get('ONE_USERNAME') + if not self.module.check_mode: + self.one.image.delete(image.ID) + self.wait_for_delete(image.ID) - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not (url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) + return {'changed': True} def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "id": {"required": False, "type": "int"}, - "name": {"required": False, "type": "str"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'cloned', 'renamed'], - "type": "str" - }, - "enabled": {"required": False, "type": "bool"}, - "new_name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['id', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - id = params.get('id') - name = params.get('name') - state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - result = {} - - if not id and state == 'renamed': - module.fail_json(msg="Option 'id' is required when the state is 'renamed'") - - image = get_image_instance(module, client, id, name) - if not image and state != 'absent': - if id: - module.fail_json(msg="There is no image with id=" + str(id)) - else: - module.fail_json(msg="There is no image with name=" + name) - - if state == 'absent': - result = delete_image(module, client, image) - else: - result = get_image_info(image) - changed = False - result['changed'] = False - - if enabled is not None: - result = enable_image(module, client, image, enabled) - if state == "cloned": - result = clone_image(module, client, image, new_name) - elif state == "renamed": - result = rename_image(module, client, image, new_name) - - changed = changed or result['changed'] - result['changed'] = changed - - module.exit_json(**result) + ImageModule().run_module() if __name__ == '__main__': diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py index c9d7c4035f..2ad0f3c493 100644 --- a/plugins/modules/one_image_info.py +++ b/plugins/modules/one_image_info.py @@ -17,29 +17,14 @@ description: requirements: - pyone extends_documentation_fragment: + - community.general.opennebula - community.general.attributes - community.general.attributes.info_module options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the E(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the E(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the E(ONE_PASSWORD) environment variable is used. - type: str ids: description: - A list of images ids whose facts you want to gather. + - Module can use integers too. aliases: ['id'] type: list elements: str @@ -66,9 +51,16 @@ EXAMPLES = ''' msg: result - name: Gather facts about an image using ID + community.general.one_image_info: + ids: 123 + +- name: Gather facts about an image using list of ID community.general.one_image_info: ids: - 123 + - 456 + - 789 + - 0 - name: Gather facts about an image using the name community.general.one_image_info: @@ -93,182 +85,285 @@ images: returned: success contains: id: - description: image id + description: The image's id. type: int sample: 153 name: - description: image name + description: The image's name. type: str sample: app1 group_id: - description: image's group id + description: The image's group id type: int sample: 1 group_name: - description: image's group name + description: The image's group name. type: str sample: one-users owner_id: - description: image's owner id + description: The image's owner id. type: int sample: 143 owner_name: - description: image's owner name + description: The image's owner name. type: str sample: ansible-test state: - description: state of image instance + description: The image's state. type: str sample: READY used: - description: is image in use + description: The image's usage status. type: bool sample: true running_vms: - description: count of running vms that use this image + description: The image's count of running vms that use this image. type: int sample: 7 + permissions: + description: The image's permissions. + type: dict + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 + type: + description: The image's type. + type: int + sample: 0 + version_added: 9.5.0 + disk_type: + description: The image's format type. + type: int + sample: 0 + version_added: 9.5.0 + persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + version_added: 9.5.0 + source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + version_added: 9.5.0 + path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + version_added: 9.5.0 + fstype: + description: The image's filesystem type. + type: str + sample: ext4 + version_added: 9.5.0 + size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + version_added: 9.5.0 + cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + version_added: 9.5.0 + cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + version_added: 9.5.0 + target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + version_added: 9.5.0 + datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + version_added: 9.5.0 + datastore: + description: The image's datastore name. + type: int + sample: image_datastore + version_added: 9.5.0 + vms: + description: The image's list of vm ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + clones: + description: The image's list of clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + snapshots: + description: The image's list of snapshots. + type: list + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName ''' -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_all_images(client): - pool = client.imagepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all images user can Use - - return pool +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - return info +class ImageInfoModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + ids=dict(type='list', aliases=['id'], elements='str', required=False), + name=dict(type='str', required=False), + ) + mutually_exclusive = [ + ['ids', 'name'], + ] + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) -def get_images_by_ids(module, client, ids): - images = [] - pool = get_all_images(client) + def run(self, one, module, result): + params = module.params + ids = params.get('ids') + name = params.get('name') - for image in pool.IMAGE: - if str(image.ID) in ids: - images.append(image) - ids.remove(str(image.ID)) - if len(ids) == 0: + if ids: + images = self.get_images_by_ids(ids) + elif name: + images = self.get_images_by_name(name) + else: + images = self.get_all_images().IMAGE + + self.result = { + 'images': [OpenNebulaModule.get_image_info(image) for image in images] + } + + self.exit() + + def get_all_images(self): + pool = self.one.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + def get_images_by_ids(self, ids): + images = [] + pool = self.get_all_images() + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + self.module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + def get_images_by_name(self, name_pattern): + images = [] + pattern = None + + pool = self.get_all_images() + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) break - if len(ids) > 0: - module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + # if the specific name is indicated + if pattern is None and len(images) == 0: + self.module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - return images - - -def get_images_by_name(module, client, name_pattern): - - images = [] - pattern = None - - pool = get_all_images(client) - - if name_pattern.startswith('~'): - import re - if name_pattern[1] == '*': - pattern = re.compile(name_pattern[2:], re.IGNORECASE) - else: - pattern = re.compile(name_pattern[1:]) - - for image in pool.IMAGE: - if pattern is not None: - if pattern.match(image.NAME): - images.append(image) - elif name_pattern == image.NAME: - images.append(image) - break - - # if the specific name is indicated - if pattern is None and len(images) == 0: - module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - - return images - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not (url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) + return images def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, - "name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['ids', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - ids = params.get('ids') - name = params.get('name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if ids: - images = get_images_by_ids(module, client, ids) - elif name: - images = get_images_by_name(module, client, name) - else: - images = get_all_images(client).IMAGE - - result = { - 'images': [get_image_info(image) for image in images], - } - - module.exit_json(**result) + ImageInfoModule().run_module() if __name__ == '__main__': diff --git a/tests/integration/targets/one_image/aliases b/tests/integration/targets/one_image/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_image/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_image/tasks/main.yml b/tests/integration/targets/one_image/tasks/main.yml new file mode 100644 index 0000000000..c8736d73d8 --- /dev/null +++ b/tests/integration/targets/one_image/tasks/main.yml @@ -0,0 +1,210 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Checks for existence +- name: Make sure image is present by ID + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: present + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Make sure image is present by ID + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: my_image + state: present + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +# Updating an image +- name: Clone image without name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: cloned + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Clone image with name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: renamed + new_name: new_image + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Disable image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: false + register: result + +- name: Assert that network is disabled + assert: + that: + - result is changed + +- name: Enable image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: true + register: result + +- name: Assert that network is enabled + assert: + that: + - result is changed + +- name: Make image persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: true + register: result + +- name: Assert that network is persistent + assert: + that: + - result is changed + +- name: Make image non-persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + register: result + +- name: Assert that network is non-persistent + assert: + that: + - result is changed + +# Testing idempotence using the same tasks +- name: Make image non-persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + enabled: true + register: result + +- name: Assert that network not changed + assert: + that: + - result is not changed + +# Delete images +- name: Deleting non-existing image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 228 + state: absent + register: result + +- name: Assert that network not changed + assert: + that: + - result is not changed + +- name: Delete an existing image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: absent + register: result + +- name: Assert that image was deleted + assert: + that: + - result is changed + +# Trying to run with wrong arguments +- name: Try to use name and ID at the same time + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + name: name + register: result + ignore_errors: true + +- name: Assert that task failed + assert: + that: + - result is failed + +- name: Try to rename image without specifying new name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: rename + register: result + ignore_errors: true + +- name: Assert that task failed + assert: + that: + - result is failed + +- name: Try to rename image without specifying new name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: rename + register: result + ignore_errors: true diff --git a/tests/integration/targets/one_image_info/aliases b/tests/integration/targets/one_image_info/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_image_info/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_image_info/tasks/main.yml b/tests/integration/targets/one_image_info/tasks/main.yml new file mode 100644 index 0000000000..fede116241 --- /dev/null +++ b/tests/integration/targets/one_image_info/tasks/main.yml @@ -0,0 +1,192 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Checks for existence +- name: Get info by ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Get info by list of ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + ids: + - 2 + - 2 + - 8 + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Get info by list of ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: somename + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Gather all info + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +- name: Gather info by regex + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: '~my_image-[0-9].*' + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +- name: Gather info by regex and ignore upper/lower cases + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: '~*my_image-[0-9].*' + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +# Updating an image +- name: Clone image without name + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: cloned + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Clone image with name + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: renamed + new_name: new_image + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Disable image + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: false + register: result + +- name: Assert that network is disabled + assert: + that: + - result is changed + +- name: Enable image + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: true + register: result + +- name: Assert that network is enabled + assert: + that: + - result is changed + +- name: Make image persistent + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: true + register: result + +- name: Assert that network is persistent + assert: + that: + - result is changed + +- name: Make image non-persistent + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + register: result + +- name: Assert that network is non-persistent + assert: + that: + - result is changed + +# Testing errors +- name: Try to use name and ID a the same time + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + name: somename + register: result + ignore_errors: true + +- name: Assert that network not changed + assert: + that: + - result is failed From 8610223d03d46c02b4190661a62d1977ba8f4a89 Mon Sep 17 00:00:00 2001 From: JaegerMaKn Date: Sat, 5 Oct 2024 15:03:04 +0200 Subject: [PATCH 270/482] dig lookup plugin: Fix using only last nameserver specified (#8970) * dig plugin: Fix using only last nameserver given Currently, when specifying multiple nameservers either using multiple `@ns.example.com` arguments or by specifying multiple nameservers in a single argument (@ns1.example.com,ns2.example.com), due to a bug only the very last nameserver that is specified is actually used. This is because for every iteration of the for ns in nsset loop, the local list of nameservers is cleared and after adding the currently processed nameserver entry, the whole `nameservers` list of the Resolver instance is overridden with that new list with just one element. And as far as I can see, when setting that `nameserver` property, the dnspython library actually overrides the existing list and doesn't do some trickery to append the new nameservers or something like that. Therefore, the assignment of the `nameservers` property of the Resolver is moved after the argument processing so all nameservers are added and then collectively written to the `nameservers` property of the Resolver. * Add CHANGELOG fragment --- changelogs/fragments/8970-fix-dig-multi-nameservers.yml | 2 ++ plugins/lookup/dig.py | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8970-fix-dig-multi-nameservers.yml diff --git a/changelogs/fragments/8970-fix-dig-multi-nameservers.yml b/changelogs/fragments/8970-fix-dig-multi-nameservers.yml new file mode 100644 index 0000000000..e7f93853e9 --- /dev/null +++ b/changelogs/fragments/8970-fix-dig-multi-nameservers.yml @@ -0,0 +1,2 @@ +bugfixes: + - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). \ No newline at end of file diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 5be57cec78..7716331825 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -330,6 +330,7 @@ class LookupModule(LookupBase): myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) domains = [] + nameservers = [] qtype = self.get_option('qtype') flat = self.get_option('flat') fail_on_error = self.get_option('fail_on_error') @@ -345,7 +346,6 @@ class LookupModule(LookupBase): if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. nsset = t[1:].split(',') for ns in nsset: - nameservers = [] # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. @@ -358,7 +358,6 @@ class LookupModule(LookupBase): nameservers.append(nsaddr) except Exception as e: raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers continue if '=' in t: try: @@ -397,6 +396,9 @@ class LookupModule(LookupBase): # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + if len(nameservers) > 0: + myres.nameservers = nameservers + if qtype.upper() == 'PTR': reversed_domains = [] for domain in domains: From 29a2df8e6b6b52f28aa99474f753cb5e7d343603 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 7 Oct 2024 21:56:37 +0200 Subject: [PATCH 271/482] udm_user, homectl: use legacycrypt on Python 3.13+ (#8987) Use legacycrypt on Python 3.13+. --- changelogs/fragments/8987-legacycrypt.yml | 3 +++ plugins/modules/homectl.py | 23 ++++++++++++----- plugins/modules/udm_user.py | 25 +++++++++++++------ .../targets/homectl/tasks/main.yml | 5 ++++ 4 files changed, 43 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/8987-legacycrypt.yml diff --git a/changelogs/fragments/8987-legacycrypt.yml b/changelogs/fragments/8987-legacycrypt.yml new file mode 100644 index 0000000000..ce955f3564 --- /dev/null +++ b/changelogs/fragments/8987-legacycrypt.yml @@ -0,0 +1,3 @@ +bugfixes: + - "homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987)." + - "udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987)." diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index 7751651c85..58176f3389 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -18,11 +18,11 @@ version_added: 4.4.0 description: - Manages a user's home directory managed by systemd-homed. notes: - - This module does B(not) work with Python 3.13 or newer. It uses the deprecated L(crypt Python module, - https://docs.python.org/3.12/library/crypt.html) from the Python standard library, which was removed - from Python 3.13. + - This module requires the deprecated L(crypt Python module, + https://docs.python.org/3.12/library/crypt.html) library which was removed from Python 3.13. + For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). requirements: - - Python 3.12 or earlier + - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: - community.general.attributes attributes: @@ -284,6 +284,17 @@ else: HAS_CRYPT = True CRYPT_IMPORT_ERROR = None +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + class Homectl(object): '''#TODO DOC STRINGS''' @@ -606,9 +617,9 @@ def main(): ] ) - if not HAS_CRYPT: + if not HAS_CRYPT and not HAS_LEGACYCRYPT: module.fail_json( - msg=missing_required_lib('crypt (part of Python 3.13 standard library)'), + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), exception=CRYPT_IMPORT_ERROR, ) diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index 5a2e090497..5257a22028 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -21,11 +21,11 @@ description: server (UCS). It uses the python API of the UCS to create a new object or edit it." notes: - - This module does B(not) work with Python 3.13 or newer. It uses the deprecated L(crypt Python module, - https://docs.python.org/3.12/library/crypt.html) from the Python standard library, which was removed - from Python 3.13. + - This module requires the deprecated L(crypt Python module, + https://docs.python.org/3.12/library/crypt.html) library which was removed from Python 3.13. + For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). requirements: - - Python 3.12 or earlier + - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: - community.general.attributes attributes: @@ -350,6 +350,17 @@ else: HAS_CRYPT = True CRYPT_IMPORT_ERROR = None +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + def main(): expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") @@ -467,10 +478,10 @@ def main(): ]) ) - if not HAS_CRYPT: + if not HAS_CRYPT and not HAS_LEGACYCRYPT: module.fail_json( - msg=missing_required_lib('crypt (part of Python 3.13 standard library)'), - exception=CRYPT_IMPORT_ERROR, + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=LEGACYCRYPT_IMPORT_ERROR, ) username = module.params['username'] diff --git a/tests/integration/targets/homectl/tasks/main.yml b/tests/integration/targets/homectl/tasks/main.yml index 93c1089b47..aa924293e3 100644 --- a/tests/integration/targets/homectl/tasks/main.yml +++ b/tests/integration/targets/homectl/tasks/main.yml @@ -15,6 +15,11 @@ ignore_errors: true - block: + - name: Install legacycrypt on Python 3.13+ + pip: + name: legacycrypt + when: ansible_python_version is version("3.13", ">=") + - name: Check and start systemd-homed service service: name: systemd-homed.service From 5b4f41748d02cd8695b017c670b3c37a5a189828 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:00:26 +1300 Subject: [PATCH 272/482] Update docs with references to man pages (#8983) * update docs with references to man pages * reformat module docs * gconftool2/_info: docs adjustments --- plugins/doc_fragments/pipx.py | 7 +- plugins/modules/ansible_galaxy_install.py | 211 +++++++++-------- plugins/modules/cpanm.py | 74 +++--- plugins/modules/gconftool2.py | 91 ++++--- plugins/modules/gconftool2_info.py | 42 ++-- plugins/modules/gio_mime.py | 70 +++--- plugins/modules/mksysb.py | 46 ++-- plugins/modules/pipx.py | 274 +++++++++++----------- plugins/modules/pipx_info.py | 77 +++--- plugins/modules/xfconf.py | 3 +- 10 files changed, 461 insertions(+), 434 deletions(-) diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py index 112695f24f..52593f24f3 100644 --- a/plugins/doc_fragments/pipx.py +++ b/plugins/doc_fragments/pipx.py @@ -33,5 +33,10 @@ notes: - > This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using the R(environment Ansible keyword, playbooks_environment). - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). + +seealso: + - name: C(pipx) command manual page + description: Manual page for the command. + link: https://pipx.pypa.io/latest/docs/ + ''' diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index b0f3aeb5da..62de70bb63 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -9,23 +9,29 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ +--- module: ansible_galaxy_install author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Install Ansible roles or collections using ansible-galaxy version_added: 3.5.0 description: - - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +- This module allows the installation of Ansible collections or roles using C(ansible-galaxy). notes: - - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. - - > - The module will try and run using the C(C.UTF-8) locale. - If that fails, it will try C(en_US.UTF-8). - If that one also fails, the module will fail. +- Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. +- > + The module will try and run using the C(C.UTF-8) locale. + If that fails, it will try C(en_US.UTF-8). + If that one also fails, the module will fail. +seealso: +- name: C(ansible-galaxy) command manual page + description: Manual page for the command. + link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html + requirements: - - ansible-core 2.11 or newer +- ansible-core 2.11 or newer extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: none @@ -34,62 +40,63 @@ attributes: options: state: description: - - > - If O(state=present) then the collection or role will be installed. - Note that the collections and roles are not updated with this option. - - > - Currently the O(state=latest) is ignored unless O(type=collection), and it will - ensure the collection is installed and updated to the latest available version. - - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + - > + If O(state=present) then the collection or role will be installed. + Note that the collections and roles are not updated with this option. + - > + Currently the O(state=latest) is ignored unless O(type=collection), and it will + ensure the collection is installed and updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). type: str - choices: [ present, latest ] + choices: [present, latest] default: present version_added: 9.1.0 type: description: - - The type of installation performed by C(ansible-galaxy). - - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. - - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices." + - The type of installation performed by C(ansible-galaxy). + - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. + - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices." type: str choices: [collection, role, both] required: true name: description: - - Name of the collection or role being installed. - - > - Versions can be specified with C(ansible-galaxy) usual formats. - For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0). - - O(name) and O(requirements_file) are mutually exclusive. + - Name of the collection or role being installed. + - > + Versions can be specified with C(ansible-galaxy) usual formats. + For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0). + - O(name) and O(requirements_file) are mutually exclusive. type: str requirements_file: description: - - Path to a file containing a list of requirements to be installed. - - It works for O(type) equals to V(collection) and V(role). - - O(name) and O(requirements_file) are mutually exclusive. + - Path to a file containing a list of requirements to be installed. + - It works for O(type) equals to V(collection) and V(role). + - O(name) and O(requirements_file) are mutually exclusive. type: path dest: description: - - The path to the directory containing your collections or roles, according to the value of O(type). - - > - Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) - contains both roles and collections and O(dest) is specified. + - The path to the directory containing your collections or roles, according to the value of O(type). + - > + Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) + contains both roles and collections and O(dest) is specified. type: path no_deps: description: - - Refrain from installing dependencies. + - Refrain from installing dependencies. version_added: 4.5.0 type: bool default: false force: description: - - Force overwriting existing roles and/or collections. - - It can be used for upgrading, but the module output will always report C(changed=true). - - Using O(force=true) is mandatory when downgrading. + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output will always report C(changed=true). + - Using O(force=true) is mandatory when downgrading. type: bool default: false """ EXAMPLES = """ +--- - name: Install collection community.network community.general.ansible_galaxy_install: type: collection @@ -111,76 +118,76 @@ EXAMPLES = """ type: collection name: community.network:3.0.2 force: true - """ RETURN = """ - type: - description: The value of the O(type) parameter. - type: str - returned: always - name: - description: The value of the O(name) parameter. - type: str - returned: always - dest: - description: The value of the O(dest) parameter. - type: str - returned: always - requirements_file: - description: The value of the O(requirements_file) parameter. - type: str - returned: always - force: - description: The value of the O(force) parameter. - type: bool - returned: always - installed_roles: - description: - - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. - - If O(name) is specified, returns that role name and the version installed per path. - type: dict - returned: always when installing roles - contains: - "": - description: Roles and versions for that path. - type: dict - sample: - /home/user42/.ansible/roles: - ansistrano.deploy: 3.9.0 - baztian.xfce: v0.0.3 - /custom/ansible/roles: - ansistrano.deploy: 3.8.0 - installed_collections: - description: - - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. - - If O(name) is specified, returns that collection name and the version installed per path. - type: dict - returned: always when installing collections - contains: - "": - description: Collections and versions for that path - type: dict - sample: - /home/az/.ansible/collections/ansible_collections: - community.docker: 1.6.0 - community.general: 3.0.2 - /custom/ansible/ansible_collections: - community.general: 3.1.0 - new_collections: - description: New collections installed by this module. - returned: success - type: dict - sample: - community.general: 3.1.0 - community.docker: 1.6.1 - new_roles: - description: New roles installed by this module. - returned: success - type: dict - sample: - ansistrano.deploy: 3.8.0 +--- +type: + description: The value of the O(type) parameter. + type: str + returned: always +name: + description: The value of the O(name) parameter. + type: str + returned: always +dest: + description: The value of the O(dest) parameter. + type: str + returned: always +requirements_file: + description: The value of the O(requirements_file) parameter. + type: str + returned: always +force: + description: The value of the O(force) parameter. + type: bool + returned: always +installed_roles: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If O(name) is specified, returns that role name and the version installed per path. + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 +installed_collections: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If O(name) is specified, returns that collection name and the version installed per path. + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 +new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 +new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 """ import re diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 3beae895dc..25489170dd 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -10,14 +10,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: cpanm short_description: Manages Perl library dependencies description: - - Manage Perl library dependencies using cpanminus. +- Manage Perl library dependencies using cpanminus. extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: none @@ -27,76 +27,82 @@ options: name: type: str description: - - The Perl library to install. Valid values change according to the O(mode), see notes for more details. - - Note that for installing from a local path the parameter O(from_path) should be used. + - The Perl library to install. Valid values change according to the O(mode), see notes for more details. + - Note that for installing from a local path the parameter O(from_path) should be used. aliases: [pkg] from_path: type: path description: - - The local directory or C(tar.gz) file to install from. + - The local directory or C(tar.gz) file to install from. notest: description: - - Do not run unit tests. + - Do not run unit tests. type: bool default: false locallib: description: - - Specify the install base to install modules. + - Specify the install base to install modules. type: path mirror: description: - - Specifies the base URL for the CPAN mirror to use. + - Specifies the base URL for the CPAN mirror to use. type: str mirror_only: description: - - Use the mirror's index file instead of the CPAN Meta DB. + - Use the mirror's index file instead of the CPAN Meta DB. type: bool default: false installdeps: description: - - Only install dependencies. + - Only install dependencies. type: bool default: false version: description: - - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. + - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. type: str executable: description: - - Override the path to the cpanm executable. + - Override the path to the cpanm executable. type: path mode: description: - - Controls the module behavior. See notes below for more details. - - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + - Controls the module behavior. See notes below for more details. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. type: str choices: [compatibility, new] default: new version_added: 3.0.0 name_check: description: - - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified). + - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified). type: str version_added: 3.0.0 notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - - "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)." - - > - O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. - This was the default mode before community.general 9.0.0. - O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) - when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. - C(cpanm) version specifiers do not work in this mode. - - > - O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. - This is the default mode from community.general 9.0.0 onwards. -author: - - "Franck Cuny (@fcuny)" - - "Alexei Znamensky (@russoz)" -''' +- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +- "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)." +- > + O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. + This was the default mode before community.general 9.0.0. + O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) + when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. + C(cpanm) version specifiers do not work in this mode. +- > + O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, + a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. + This is the default mode from community.general 9.0.0 onwards. -EXAMPLES = ''' +seealso: +- name: C(cpanm) command manual page + description: Manual page for the command. + link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm +author: +- "Franck Cuny (@fcuny)" +- "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = """ +--- - name: Install Dancer perl package community.general.cpanm: name: Dancer @@ -134,7 +140,7 @@ EXAMPLES = ''' community.general.cpanm: name: Dancer version: '1.0' -''' +""" import os diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index deae8a2f16..7cf9a92c44 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -9,16 +9,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: gconftool2 author: - - Kenneth D. Evensen (@kevensen) +- Kenneth D. Evensen (@kevensen) short_description: Edit GNOME Configurations description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. +- This module allows for the manipulation of GNOME 2 Configuration via gconftool-2. Please see the gconftool-2(1) man pages for more details. +seealso: +- name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: full @@ -28,42 +33,36 @@ options: key: type: str description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1). + - A GConf preference key is an element in the GConf repository that corresponds to an application preference. required: true value: type: str description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. - This is ignored unless O(state=present). See man gconftool-2(1). + - Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is ignored unless O(state=present). value_type: type: str description: - - The type of value being set. - This is ignored unless O(state=present). See man gconftool-2(1). - choices: [ bool, float, int, string ] + - The type of value being set. This is ignored unless O(state=present). + choices: [bool, float, int, string] state: type: str description: - The action to take upon the key/value. required: true - choices: [ absent, present ] + choices: [absent, present] config_source: type: str description: - Specify a configuration source to use rather than the default path. - See man gconftool-2(1). direct: description: - - Access the config database directly, bypassing server. If O(direct) is - specified then the O(config_source) must be specified as well. - See man gconftool-2(1). + - Access the config database directly, bypassing server. If O(direct) is specified then the O(config_source) must be specified as well. type: bool default: false -''' +""" EXAMPLES = """ +--- - name: Change the widget font to "Serif 12" community.general.gconftool2: key: "/desktop/gnome/interface/font_name" @@ -71,33 +70,33 @@ EXAMPLES = """ value: "Serif 12" """ -RETURN = ''' - key: - description: The key specified in the module parameters. - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed. - returned: success - type: str - sample: string - value: - description: - - The value of the preference key after executing the module or V(null) if key is removed. - - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. - returned: success - type: str - sample: "Serif 12" - previous_value: - description: - - The value of the preference key before executing the module. - - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. - returned: success - type: str - sample: "Serif 12" -... -''' +RETURN = """ +--- +key: + description: The key specified in the module parameters. + returned: success + type: str + sample: /desktop/gnome/interface/font_name +value_type: + description: The type of the value that was changed. + returned: success + type: str + sample: string +value: + description: + - The value of the preference key after executing the module or V(null) if key is removed. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +previous_value: + description: + - The value of the preference key before executing the module. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index f66e2da8f7..ebe2121ad1 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -7,46 +7,50 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: gconftool2_info author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Retrieve GConf configurations version_added: 5.1.0 description: - - This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2). +- This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2). extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module +- community.general.attributes +- community.general.attributes.info_module options: key: description: - The key name for an element in the GConf database. type: str required: true -notes: - - See man gconftool-2(1) for more details. seealso: - - name: gconf repository (archived) - description: Git repository for the project. It is an archived project, so the repository is read-only. - link: https://gitlab.gnome.org/Archive/gconf -''' +- name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en +- name: gconf repository (archived) + description: Git repository for the project. It is an archived project, so the repository is read-only. + link: https://gitlab.gnome.org/Archive/gconf +""" EXAMPLES = """ +--- - name: Get value for a certain key in the database. community.general.gconftool2_info: key: /desktop/gnome/background/picture_filename register: result """ -RETURN = ''' - value: - description: - - The value of the property. - returned: success - type: str - sample: Monospace 10 -''' +RETURN = """ +--- +value: + description: + - The value of the property. + returned: success + type: str + sample: Monospace 10 +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index bb1ef6ebe3..20ccb22329 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -7,16 +7,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: gio_mime author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Set default handler for MIME type, for applications using Gnome GIO version_added: 7.5.0 description: - - This module allows configuring the default handler for a specific MIME type, to be used by applications built with th Gnome GIO API. +- This module allows configuring the default handler for a specific MIME type, to be used by applications built with th Gnome GIO API. extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: full @@ -25,24 +26,28 @@ attributes: options: mime_type: description: - - MIME type for which a default handler will be set. + - MIME type for which a default handler will be set. type: str required: true handler: description: - - Default handler will be set for the MIME type. + - Default handler will be set for the MIME type. type: str required: true notes: - - This module is a thin wrapper around the C(gio mime) command (and subcommand). - - See man gio(1) for more details. +- This module is a thin wrapper around the C(gio mime) command (and subcommand). +- See man gio(1) for more details. seealso: - - name: GIO Documentation - description: Reference documentation for the GIO API.. - link: https://docs.gtk.org/gio/ -''' +- name: C(gio) command manual page + description: Manual page for the command. + link: https://man.archlinux.org/man/gio.1 +- name: GIO Documentation + description: Reference documentation for the GIO API.. + link: https://docs.gtk.org/gio/ +""" EXAMPLES = """ +--- - name: Set chrome as the default handler for https community.general.gio_mime: mime_type: x-scheme-handler/https @@ -50,26 +55,27 @@ EXAMPLES = """ register: result """ -RETURN = ''' - handler: - description: - - The handler set as default. - returned: success - type: str - sample: google-chrome.desktop - stdout: - description: - - The output of the C(gio) command. - returned: success - type: str - sample: Set google-chrome.desktop as the default for x-scheme-handler/https - stderr: - description: - - The error output of the C(gio) command. - returned: failure - type: str - sample: 'gio: Failed to load info for handler "never-existed.desktop"' -''' +RETURN = """ +--- +handler: + description: + - The handler set as default. + returned: success + type: str + sample: google-chrome.desktop +stdout: + description: + - The output of the C(gio) command. + returned: success + type: str + sample: Set google-chrome.desktop as the default for x-scheme-handler/https +stderr: + description: + - The error output of the C(gio) command. + returned: failure + type: str + sample: 'gio: Failed to load info for handler "never-existed.desktop"' +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index 1280f04d59..d1f49ca82e 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -10,15 +10,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- author: Kairo Araujo (@kairoaraujo) module: mksysb short_description: Generates AIX mksysb rootvg backups description: - - This module manages a basic AIX mksysb (image) of rootvg. +- This module manages a basic AIX mksysb (image) of rootvg. +seealso: +- name: C(mksysb) command manual page + description: Manual page for the command. + link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command + extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: full @@ -27,72 +32,73 @@ attributes: options: backup_crypt_files: description: - - Backup encrypted files. + - Backup encrypted files. type: bool default: true backup_dmapi_fs: description: - - Back up DMAPI filesystem files. + - Back up DMAPI filesystem files. type: bool default: true create_map_files: description: - - Creates a new MAP files. + - Creates a new MAP files. type: bool default: false exclude_files: description: - - Excludes files using C(/etc/rootvg.exclude). + - Excludes files using C(/etc/rootvg.exclude). type: bool default: false exclude_wpar_files: description: - - Excludes WPAR files. + - Excludes WPAR files. type: bool default: false extended_attrs: description: - - Backup extended attributes. + - Backup extended attributes. type: bool default: true name: type: str description: - - Backup name + - Backup name required: true new_image_data: description: - - Creates a new file data. + - Creates a new file data. type: bool default: true software_packing: description: - - Exclude files from packing option listed in - C(/etc/exclude_packing.rootvg). + - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). type: bool default: false storage_path: type: str description: - - Storage path where the mksysb will stored. + - Storage path where the mksysb will stored. required: true use_snapshot: description: - - Creates backup using snapshots. + - Creates backup using snapshots. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: Running a backup image mksysb community.general.mksysb: name: myserver storage_path: /repository/images exclude_files: true exclude_wpar_files: true -''' +""" -RETURN = ''' +RETURN = """ +--- changed: description: Return changed for mksysb actions as true or false. returned: always @@ -101,7 +107,7 @@ msg: description: Return message regarding the action. returned: always type: str -''' +""" import os diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index f9ad13980d..c317ae8da8 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -9,150 +9,150 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: pipx short_description: Manages applications installed with pipx version_added: 3.8.0 description: - - Manage Python applications installed in isolated virtualenvs using pipx. +- Manage Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: - - community.general.attributes - - community.general.pipx +- community.general.attributes +- community.general.pipx attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - state: - type: str - choices: - - present - - absent - - install - - install_all - - uninstall - - uninstall_all - - inject - - uninject - - upgrade - - upgrade_shared - - upgrade_all - - reinstall - - reinstall_all - - latest - - pin - - unpin - default: install - description: - - Desired state for the application. - - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. - - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). - It was added in community.general 5.5.0. - - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), - make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0. - name: - type: str - description: - - The name of the application. In C(pipx) documentation it is also referred to as - the name of the virtual environment where the application will be installed. - - If O(name) is a simple package name without version specifiers, - then that name is used as the Python package name to be installed. - - Use O(source) for passing package specifications or installing from URLs or directories. - source: - type: str - description: - - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. - - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. - - The value of this option is passed as-is to C(pipx). - - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. - install_apps: - description: - - Add apps from the injected packages. - - Only used when O(state=inject). - type: bool - default: false - version_added: 6.5.0 - install_deps: - description: - - Include applications of dependent packages. - - Only used when O(state=install), O(state=latest), or O(state=inject). - type: bool - default: false - inject_packages: - description: - - Packages to be injected into an existing virtual environment. - - Only used when O(state=inject). - type: list - elements: str - force: - description: - - Force modification of the application's virtual environment. See C(pipx) for details. - - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). - type: bool - default: false - include_injected: - description: - - Upgrade the injected packages along with the application. - - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). - - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. - type: bool - default: false - index_url: - description: - - Base URL of Python Package Index. - - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). - type: str - python: - description: - - Python version to be used when creating the application virtual environment. Must be 3.6+. - - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). - type: str - system_site_packages: - description: - - Give application virtual environment access to the system site-packages directory. - - Only used when O(state=install) or O(state=latest). - type: bool - default: false - version_added: 6.6.0 - editable: - description: - - Install the project in editable mode. - type: bool - default: false - version_added: 4.6.0 - pip_args: - description: - - Arbitrary arguments to pass directly to C(pip). - type: str - version_added: 4.6.0 - suffix: - description: - - Optional suffix for virtual environment and executable names. - - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change." - type: str - version_added: 9.3.0 - global: - version_added: 9.4.0 - spec_metadata: - description: - - Spec metadata file for O(state=install_all). - - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) - with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). - type: path - version_added: 9.4.0 + state: + type: str + choices: + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin + default: install + description: + - Desired state for the application. + - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general + 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have a + compatible version when using this option. These states have been added in community.general 9.4.0. + name: + type: str + description: + - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application + will be installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed. + - Use O(source) for passing package specifications or installing from URLs or directories. + source: + type: str + description: + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. + install_apps: + description: + - Add apps from the injected packages. + - Only used when O(state=inject). + type: bool + default: false + version_added: 6.5.0 + install_deps: + description: + - Include applications of dependent packages. + - Only used when O(state=install), O(state=latest), or O(state=inject). + type: bool + default: false + inject_packages: + description: + - Packages to be injected into an existing virtual environment. + - Only used when O(state=inject). + type: list + elements: str + force: + description: + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + type: bool + default: false + include_injected: + description: + - Upgrade the injected packages along with the application. + - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). + - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. + type: bool + default: false + index_url: + description: + - Base URL of Python Package Index. + - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). + type: str + python: + description: + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). + type: str + system_site_packages: + description: + - Give application virtual environment access to the system site-packages directory. + - Only used when O(state=install) or O(state=latest). + type: bool + default: false + version_added: 6.6.0 + editable: + description: + - Install the project in editable mode. + type: bool + default: false + version_added: 4.6.0 + pip_args: + description: + - Arbitrary arguments to pass directly to C(pip). + type: str + version_added: 4.6.0 + suffix: + description: + - Optional suffix for virtual environment and executable names. + - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change." + type: str + version_added: 9.3.0 + global: + version_added: 9.4.0 + spec_metadata: + description: + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with + O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + type: path + version_added: 9.4.0 notes: - - > - This first implementation does not verify whether a specified version constraint has been installed or not. - Hence, when using version operators, C(pipx) module will always try to execute the operation, - even when the application was previously installed. - This feature will be added in the future. +- > + This first implementation does not verify whether a specified version constraint has been installed or not. + Hence, when using version operators, C(pipx) module will always try to execute the operation, + even when the application was previously installed. + This feature will be added in the future. author: - - "Alexei Znamensky (@russoz)" -''' +- "Alexei Znamensky (@russoz)" +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: Install tox community.general.pipx: name: tox @@ -181,14 +181,14 @@ EXAMPLES = ''' - name: Install multiple packages from list vars: pipx_packages: - - pycowsay - - black - - tox + - pycowsay + - black + - tox community.general.pipx: name: "{{ item }}" state: latest with_items: "{{ pipx_packages }}" -''' +""" import json diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 0e0cc0fe14..65c0ba552e 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -9,45 +9,46 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: pipx_info short_description: Rretrieves information about applications installed with pipx version_added: 5.6.0 description: - - Retrieve details about Python applications installed in isolated virtualenvs using pipx. +- Retrieve details about Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module - - community.general.pipx +- community.general.attributes +- community.general.attributes.info_module +- community.general.pipx options: - name: - description: - - Name of an application installed with C(pipx). - type: str - include_deps: - description: - - Include dependent packages in the output. - type: bool - default: false - include_injected: - description: - - Include injected packages in the output. - type: bool - default: false - include_raw: - description: - - Returns the raw output of C(pipx list --json). - - The raw output is not affected by O(include_deps) or O(include_injected). - type: bool - default: false - global: - version_added: 9.3.0 + name: + description: + - Name of an application installed with C(pipx). + type: str + include_deps: + description: + - Include dependent packages in the output. + type: bool + default: false + include_injected: + description: + - Include injected packages in the output. + type: bool + default: false + include_raw: + description: + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by O(include_deps) or O(include_injected). + type: bool + default: false + global: + version_added: 9.3.0 author: - - "Alexei Znamensky (@russoz)" -''' +- "Alexei Znamensky (@russoz)" +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: retrieve all installed applications community.general.pipx_info: {} @@ -65,9 +66,10 @@ EXAMPLES = ''' community.general.pipx_info: name: ansible-lint include_deps: true -''' +""" -RETURN = ''' +RETURN = """ +--- application: description: The list of installed applications returned: success @@ -107,15 +109,8 @@ cmd: returned: success type: list elements: str - sample: [ - "/usr/bin/python3.10", - "-m", - "pipx", - "list", - "--include-injected", - "--json" - ] -''' + sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"] +""" import json diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 2e1e67ff32..8bb0abc273 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -16,8 +16,7 @@ author: - "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: -- This module allows for the manipulation of Xfce 4 Configuration with the help of xfconf-query. Please see the xfconf-query(1) man page for more - details. +- This module allows for the manipulation of Xfce 4 Configuration with the help of C(xfconf-query). seealso: - name: xfconf-query(1) man page description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. From 1d86d49688ea0f0adedc2eed30e70dbd21e4f80d Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Mon, 7 Oct 2024 23:12:06 +0300 Subject: [PATCH 273/482] ipa_getkeytab: Create module (#8938) * Add ipa_getkeytab * Parameters fix * PR fixes * PR fixes 2 * Fix unit tests * Fix doc and unit tests * Fix doc * Fix doc 2 * Fix doc 3 * PR fixes * PR fixes 2 * Fix name * Fix description typo * Fix variable names * Update tests * Add man reference --- .github/BOTMETA.yml | 2 + plugins/modules/ipa_getkeytab.py | 247 ++++++++++++++++++ .../plugins/modules/test_ipa_getkeytab.py | 60 +++++ 3 files changed, 309 insertions(+) create mode 100644 plugins/modules/ipa_getkeytab.py create mode 100644 tests/unit/plugins/modules/test_ipa_getkeytab.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 10f2aee95b..be0bf6da30 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -716,6 +716,8 @@ files: $modules/ipa_: maintainers: $team_ipa ignore: fxfitz + $modules/ipa_getkeytab.py: + maintainers: abakanovskii $modules/ipa_dnsrecord.py: maintainers: $team_ipa jwbernin $modules/ipbase_info.py: diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py new file mode 100644 index 0000000000..3d4f81d5b1 --- /dev/null +++ b/plugins/modules/ipa_getkeytab.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ipa_getkeytab +short_description: Manage keytab file in FreeIPA +version_added: 9.5.0 +description: + - Manage keytab file with C(ipa-getkeytab) utility. + - See U(https://manpages.ubuntu.com/manpages/jammy/man1/ipa-getkeytab.1.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The base path where to put generated keytab file. + type: path + aliases: ["keytab"] + required: true + principal: + description: + - The non-realm part of the full principal name. + type: str + required: true + ipa_host: + description: + - The IPA server to retrieve the keytab from (FQDN). + type: str + ldap_uri: + description: + - LDAP URI. If V(ldap://) is specified, STARTTLS is initiated by default. + - Can not be used with the O(ipa_host) option. + type: str + bind_dn: + description: + - The LDAP DN to bind as when retrieving a keytab without Kerberos credentials. + - Generally used with the O(bind_pw) option. + type: str + bind_pw: + description: + - The LDAP password to use when not binding with Kerberos. + type: str + password: + description: + - Use this password for the key instead of one randomly generated. + type: str + ca_cert: + description: + - The path to the IPA CA certificate used to validate LDAPS/STARTTLS connections. + type: path + sasl_mech: + description: + - SASL mechanism to use if O(bind_dn) and O(bind_pw) are not specified. + choices: ["GSSAPI", "EXTERNAL"] + type: str + retrieve_mode: + description: + - Retrieve an existing key from the server instead of generating a new one. + - This is incompatible with the O(password), and will work only against a IPA server more recent than version 3.3. + - The user requesting the keytab must have access to the keys for this operation to succeed. + - Be aware that if set V(true), a new keytab will be generated. + - This invalidates all previously retrieved keytabs for this service principal. + type: bool + encryption_types: + description: + - The list of encryption types to use to generate keys. + - It will use local client defaults if not provided. + - Valid values depend on the Kerberos library version and configuration. + type: str + state: + description: + - The state of the keytab file. + - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set O(force=true). + type: str + default: present + choices: ["present", "absent"] + force: + description: + - Force recreation if exists already. + type: bool +requirements: + - freeipa-client + - Managed host is FreeIPA client +extends_documentation_fragment: + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Get kerberos ticket + ansible.builtin.shell: kinit admin + args: + stdin: "{{ aldpro_admin_password }}" + changed_when: true + +- name: Create keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + +- name: Retrieve already existing keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + retrieve_mode: true + +- name: Force keytab recreation + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + force: true +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.path = kwargs['path'] + self.state = kwargs['state'] + self.principal = kwargs['principal'] + self.ipa_host = kwargs['ipa_host'] + self.ldap_uri = kwargs['ldap_uri'] + self.bind_dn = kwargs['bind_dn'] + self.bind_pw = kwargs['bind_pw'] + self.password = kwargs['password'] + self.ca_cert = kwargs['ca_cert'] + self.sasl_mech = kwargs['sasl_mech'] + self.retrieve_mode = kwargs['retrieve_mode'] + self.encryption_types = kwargs['encryption_types'] + + self.runner = CmdRunner( + module, + command='ipa-getkeytab', + arg_formats=dict( + retrieve_mode=cmd_runner_fmt.as_bool('--retrieve'), + path=cmd_runner_fmt.as_opt_val('--keytab'), + ipa_host=cmd_runner_fmt.as_opt_val('--server'), + principal=cmd_runner_fmt.as_opt_val('--principal'), + ldap_uri=cmd_runner_fmt.as_opt_val('--ldapuri'), + bind_dn=cmd_runner_fmt.as_opt_val('--binddn'), + bind_pw=cmd_runner_fmt.as_opt_val('--bindpw'), + password=cmd_runner_fmt.as_opt_val('--password'), + ca_cert=cmd_runner_fmt.as_opt_val('--cacert'), + sasl_mech=cmd_runner_fmt.as_opt_val('--mech'), + encryption_types=cmd_runner_fmt.as_opt_val('--enctypes'), + ) + ) + + def _exec(self, check_rc=True): + with self.runner( + "retrieve_mode path ipa_host principal ldap_uri bind_dn bind_pw password ca_cert sasl_mech encryption_types", + check_rc=check_rc + ) as ctx: + rc, out, err = ctx.run() + return out + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["keytab"]), + state=dict(default='present', choices=['present', 'absent']), + principal=dict(type='str', required=True), + ipa_host=dict(type='str'), + ldap_uri=dict(type='str'), + bind_dn=dict(type='str'), + bind_pw=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + sasl_mech=dict(type='str', choices=["GSSAPI", "EXTERNAL"]), + retrieve_mode=dict(type='bool'), + encryption_types=dict(type='str'), + force=dict(type='bool'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + mutually_exclusive=[('ipa_host', 'ldap_uri'), ('retrieve_mode', 'password')], + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + force = module.params['force'] + + keytab = IPAKeytab(module, + path=path, + state=state, + principal=module.params['principal'], + ipa_host=module.params['ipa_host'], + ldap_uri=module.params['ldap_uri'], + bind_dn=module.params['bind_dn'], + bind_pw=module.params['bind_pw'], + password=module.params['password'], + ca_cert=module.params['ca_cert'], + sasl_mech=module.params['sasl_mech'], + retrieve_mode=module.params['retrieve_mode'], + encryption_types=module.params['encryption_types'], + ) + + changed = False + if state == 'present': + if os.path.exists(path): + if force and not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + keytab._exec() + changed = True + if force and module.check_mode: + changed = True + else: + changed = True + keytab._exec() + + if state == 'absent': + if os.path.exists(path): + changed = True + if not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_ipa_getkeytab.py b/tests/unit/plugins/modules/test_ipa_getkeytab.py new file mode 100644 index 0000000000..e4e8ed2ece --- /dev/null +++ b/tests/unit/plugins/modules/test_ipa_getkeytab.py @@ -0,0 +1,60 @@ +# +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.plugins.modules import ipa_getkeytab +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + + +class IPAKeytabModuleTestCase(ModuleTestCase): + module = ipa_getkeytab + + def setUp(self): + super(IPAKeytabModuleTestCase, self).setUp() + ansible_module_path = "ansible_collections.community.general.plugins.modules.ipa_getkeytab.AnsibleModule" + self.mock_run_command = patch('%s.run_command' % ansible_module_path) + self.module_main_command = self.mock_run_command.start() + self.mock_get_bin_path = patch('%s.get_bin_path' % ansible_module_path) + self.get_bin_path = self.mock_get_bin_path.start() + self.get_bin_path.return_value = '/testbin/ipa_getkeytab' + + def tearDown(self): + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + super(IPAKeytabModuleTestCase, self).tearDown() + + def module_main(self, exit_exc): + with self.assertRaises(exit_exc) as exc: + self.module.main() + return exc.exception.args[0] + + def test_present(self): + set_module_args({ + 'path': '/tmp/test.keytab', + 'principal': 'HTTP/freeipa-dc02.ipa.test', + 'ipa_host': 'freeipa-dc01.ipa.test', + 'state': 'present' + }) + + self.module_main_command.side_effect = [ + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/ipa_getkeytab', + '--keytab', '/tmp/test.keytab', + '--server', 'freeipa-dc01.ipa.test', + '--principal', 'HTTP/freeipa-dc02.ipa.test' + ], + check_rc=True, + environ_update={'LC_ALL': 'C', 'LANGUAGE': 'C'} + ), + ]) From 24f2b980b79cedd199959a29cbfd022cd5791076 Mon Sep 17 00:00:00 2001 From: Manuel Luzarreta Date: Mon, 7 Oct 2024 22:12:43 +0200 Subject: [PATCH 274/482] passwordstore: Support subkey creation and update (#8952) --- ...ord-store-lookup-create-subkey-support.yml | 2 + plugins/lookup/passwordstore.py | 85 ++++++++++++++++--- 2 files changed, 73 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml diff --git a/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml b/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml new file mode 100644 index 0000000000..73bf1710e7 --- /dev/null +++ b/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). \ No newline at end of file diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 510bdbec3d..f35d268995 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -14,7 +14,7 @@ DOCUMENTATION = ''' short_description: manage passwords with passwordstore.org's pass utility description: - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. + It can also retrieve, create or update YAML style keys stored as multilines in the passwordfile. - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead. options: @@ -33,11 +33,11 @@ DOCUMENTATION = ''' env: - name: PASSWORD_STORE_DIR create: - description: Create the password if it does not already exist. Takes precedence over O(missing). + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). type: bool default: false overwrite: - description: Overwrite the password if it does already exist. + description: Overwrite the password or the subkey if it does already exist. type: bool default: false umask: @@ -53,7 +53,9 @@ DOCUMENTATION = ''' type: bool default: false subkey: - description: Return a specific subkey of the password. When set to V(password), always returns the first line. + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it will create the subkey and return it. type: str default: password userpass: @@ -64,7 +66,7 @@ DOCUMENTATION = ''' type: integer default: 16 backup: - description: Used with O(overwrite=true). Backup the previous password in a subkey. + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. type: bool default: false nosymbols: @@ -189,6 +191,17 @@ tasks.yml: | vars: mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}" + - name: >- + Create a random 16 character password in a subkey. If the password file already exists, just add the subkey in it. + If the subkey exists, returns it + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='foo') }}" + + - name: >- + Create a random 16 character password in a subkey. Overwrite if it already exists and backup the old one. + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='user', overwrite=true, backup=true) }}" + - name: Prints 'abc' if example/test does not exist, just give the password otherwise ansible.builtin.debug: var: mypassword @@ -411,15 +424,48 @@ class LookupModule(LookupBase): def update_password(self): # generate new password, insert old lines from current result and return new password + # if the target is a subkey, only modify the subkey newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass - if self.paramvals['preserve'] or self.paramvals['timestamp']: - msg += '\n' - if self.paramvals['preserve'] and self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['timestamp'] and self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + + msg_lines = [] + subkey_exists = False + subkey_line = "{0}: {1}".format(subkey, newpass) + oldpass = None + + for line in self.passoutput: + if line.startswith("{0}: ".format(subkey)): + oldpass = self.passdict[subkey] + line = subkey_line + subkey_exists = True + + msg_lines.append(line) + + if not subkey_exists: + msg_lines.insert(2, subkey_line) + + if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: + msg_lines.append( + "lookup_pass: old subkey '{0}' password was {1} (Updated on {2})\n".format( + subkey, oldpass, datetime + ) + ) + + msg = os.linesep.join(msg_lines) + + else: + msg = newpass + + if self.paramvals['preserve'] or self.paramvals['timestamp']: + msg += '\n' + if self.paramvals['preserve'] and self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + '\n' + if self.paramvals['timestamp'] and self.paramvals['backup']: + msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: @@ -431,13 +477,21 @@ class LookupModule(LookupBase): # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + subkey = self.paramvals["subkey"] + + if subkey != "password": + msg = "\n\n{0}: {1}".format(subkey, newpass) + else: + msg = newpass + if self.paramvals['timestamp']: msg += '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + return newpass def get_passresult(self): @@ -525,7 +579,10 @@ class LookupModule(LookupBase): self.parse_params(term) # parse the input into paramvals with self.opt_lock('readwrite'): if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': + if self.paramvals['overwrite']: + with self.opt_lock('write'): + result.append(self.update_password()) + elif self.paramvals["subkey"] != "password" and not self.passdict.get(self.paramvals['subkey']): # password exists but not the subkey with self.opt_lock('write'): result.append(self.update_password()) else: From c7e2875a4d8a92e81ff09d037a206e2eae8cfdae Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 7 Oct 2024 22:13:14 +0200 Subject: [PATCH 275/482] keycloak_user_federation: add user federation config parameter `referral` to module args (#8954) * add keycloak referral parameter to module args * add changelog fragment * Update plugins/modules/keycloak_user_federation.py Co-authored-by: Felix Fontein * Update changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...ycloak-user-federation-add-referral-parameter.yml | 2 ++ plugins/modules/keycloak_user_federation.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml diff --git a/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml b/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml new file mode 100644 index 0000000000..cd8347faf0 --- /dev/null +++ b/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 0b3b610806..160d67edb4 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -442,6 +442,17 @@ options: - Max lifespan of cache entry in milliseconds. type: int + referral: + description: + - Specifies if LDAP referrals should be followed or ignored. Please note that enabling + referrals can slow down authentication as it allows the LDAP server to decide which other + LDAP servers to use. This could potentially include untrusted servers. + type: str + choices: + - ignore + - follow + version_added: 9.5.0 + mappers: description: - A list of dicts defining mappers associated with this Identity Provider. @@ -788,6 +799,7 @@ def main(): priority=dict(type='int', default=0), rdnLDAPAttribute=dict(type='str'), readTimeout=dict(type='int'), + referral=dict(type='str', choices=['ignore', 'follow']), searchScope=dict(type='str', choices=['1', '2'], default='1'), serverPrincipal=dict(type='str'), krbPrincipalAttribute=dict(type='str'), From cc8009621f5a3003d38f321e01000b548d698888 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Mon, 7 Oct 2024 23:13:51 +0300 Subject: [PATCH 276/482] ipa_host: Fix enabled and disabled states (#8920) * Fix ipa_host * PR Fixes * PR Fixes * PR Doc fixes * PR Doc fixes 2 * Fix default value --- .../fragments/8920-ipa-host-fix-state.yml | 2 ++ plugins/modules/ipa_host.py | 28 +++++++++++++------ 2 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/8920-ipa-host-fix-state.yml diff --git a/changelogs/fragments/8920-ipa-host-fix-state.yml b/changelogs/fragments/8920-ipa-host-fix-state.yml new file mode 100644 index 0000000000..0f3df64b6a --- /dev/null +++ b/changelogs/fragments/8920-ipa-host-fix-state.yml @@ -0,0 +1,2 @@ +bugfixes: + - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py index b37a606d75..791cee91f3 100644 --- a/plugins/modules/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -74,10 +74,17 @@ options: type: list elements: str state: - description: State to ensure. + description: + - State to ensure. default: present choices: ["absent", "disabled", "enabled", "present"] type: str + force_creation: + description: + - Create host if O(state=disabled) or O(state=enabled) but not present. + default: true + type: bool + version_added: 9.5.0 update_dns: description: - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. @@ -233,26 +240,31 @@ def get_host_diff(client, ipa_host, module_host): def ensure(module, client): name = module.params['fqdn'] state = module.params['state'] + force_creation = module.params['force_creation'] ipa_host = client.host_find(name=name) module_host = get_host_dict(description=module.params['description'], - force=module.params['force'], ip_address=module.params['ip_address'], + force=module.params['force'], + ip_address=module.params['ip_address'], ns_host_location=module.params['ns_host_location'], ns_hardware_platform=module.params['ns_hardware_platform'], ns_os_version=module.params['ns_os_version'], user_certificate=module.params['user_certificate'], mac_address=module.params['mac_address'], - random_password=module.params.get('random_password'), + random_password=module.params['random_password'], ) changed = False if state in ['present', 'enabled', 'disabled']: - if not ipa_host: + if not ipa_host and (force_creation or state == 'present'): changed = True if not module.check_mode: # OTP password generated by FreeIPA is visible only for host_add command # so, return directly from here. return changed, client.host_add(name=name, host=module_host) else: + if state in ['disabled', 'enabled']: + module.fail_json(msg="No host with name " + ipa_host + " found") + diff = get_host_diff(client, ipa_host, module_host) if len(diff) > 0: changed = True @@ -261,11 +273,10 @@ def ensure(module, client): for key in diff: data[key] = module_host.get(key) ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): + if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): client.host_disable(name=name) return changed, client.host_mod(name=name, host=data) - - else: + elif state == 'absent': if ipa_host: changed = True update_dns = module.params.get('update_dns', False) @@ -288,7 +299,8 @@ def main(): mac_address=dict(type='list', aliases=['macaddress'], elements='str'), update_dns=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False),) + random_password=dict(type='bool', no_log=False), + force_creation=dict(type='bool', default=True),) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) From 3b109abe18243aa21e2f5efa82fb005496a8235f Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 7 Oct 2024 22:14:22 +0200 Subject: [PATCH 277/482] keycloak_user_federation: add module argument that allows excluding `bindCredential` from update check (#8898) * add module argument that allows excluding `bindCredential` from update check * add changelog fragment * change option name to `bind_credential_update_mode` and change type to str --- ...ude-bind-credential-from-change-check.yaml | 2 ++ plugins/modules/keycloak_user_federation.py | 33 +++++++++++++++++-- 2 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml diff --git a/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml b/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml new file mode 100644 index 0000000000..8f86d510f9 --- /dev/null +++ b/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). \ No newline at end of file diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 160d67edb4..215aa7f4ca 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -93,6 +93,24 @@ options: default: true version_added: 9.4.0 + bind_credential_update_mode: + description: + - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. + Comparing the redacted value with the desired value always evaluates to not equal. This means + the before and desired states are never equal if the parameter is set. + - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. + Because of the redacted value returned by Keycloak the module will always detect a change + and make an update if a O(config.bindCredential) value is set. + - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the + desired state. The value of O(config.bindCredential) will only be updated if there are other changes + to the user federation that require an update. + type: str + default: always + choices: + - always + - only_indirect + version_added: 9.5.0 + config: description: - Dict specifying the configuration options for the provider; the contents differ depending on @@ -837,6 +855,7 @@ def main(): provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), parent_id=dict(type='str', aliases=['parentId']), remove_unspecified_mappers=dict(type='bool', default=True), + bind_credential_update_mode=dict(type='str', default='always', choices=['always', 'only_indirect']), mappers=dict(type='list', elements='dict', options=mapper_spec), ) @@ -884,8 +903,9 @@ def main(): # Filter and map the parameters names that apply comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers', 'remove_unspecified_mappers'] and - module.params.get(x) is not None] + if x not in list(keycloak_argument_spec().keys()) + + ['state', 'realm', 'mappers', 'remove_unspecified_mappers', 'bind_credential_update_mode'] + and module.params.get(x) is not None] # See if it already exists in Keycloak if cid is None: @@ -1027,8 +1047,15 @@ def main(): if state == 'present': # Process an update + desired_copy = deepcopy(desired_comp) + before_copy = deepcopy(before_comp) + # exclude bindCredential when checking wether an update is required, therefore + # updating it only if there are other changes + if module.params['bind_credential_update_mode'] == 'only_indirect': + desired_copy.get('config', []).pop('bindCredential', None) + before_copy.get('config', []).pop('bindCredential', None) # no changes - if desired_comp == before_comp: + if desired_copy == before_copy: result['changed'] = False result['end_state'] = sanitize(desired_comp) result['msg'] = "No changes required to user federation {id}.".format(id=cid) From 1bdf8fc02545acd73faf988088ea0f187f3f909f Mon Sep 17 00:00:00 2001 From: salty Date: Mon, 7 Oct 2024 22:14:52 +0200 Subject: [PATCH 278/482] cloudflare_dns: Update SRV record handling for Cloudflare API changes (#8948) --- changelogs/fragments/8679-fix-cloudflare-srv.yml | 2 ++ plugins/modules/cloudflare_dns.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/8679-fix-cloudflare-srv.yml diff --git a/changelogs/fragments/8679-fix-cloudflare-srv.yml b/changelogs/fragments/8679-fix-cloudflare-srv.yml new file mode 100644 index 0000000000..bf00fc1305 --- /dev/null +++ b/changelogs/fragments/8679-fix-cloudflare-srv.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948). diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index 1904976440..86550966be 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -716,12 +716,14 @@ class CloudflareAPI(object): "port": params['port'], "weight": params['weight'], "priority": params['priority'], - "name": params['record'], - "proto": params['proto'], - "service": params['service'] } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} + new_record = { + "type": params['type'], + "name": params['service'] + '.' + params['proto'] + '.' + params['record'], + "ttl": params['ttl'], + 'data': srv_data, + } search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] From c814fd0530f00138f385ff25d18782729a7d5469 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 7 Oct 2024 22:15:45 +0200 Subject: [PATCH 279/482] keycloak_userprofile: improve diff by deserializing fetched `kc.user.profile.config` and serializing it before sending (#8940) * deserialize fetched `kc.user.profile.config` and serialize it before sending * change `kc.user.profile.config` to JSON formatted string in mock `get_component` responses * add changelog fragment --- .../8940-keycloak_userprofile-improve-diff.yml | 2 ++ plugins/modules/keycloak_userprofile.py | 12 +++++++++--- .../plugins/modules/test_keycloak_userprofile.py | 10 ++++++---- 3 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml diff --git a/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml b/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml new file mode 100644 index 0000000000..93f57cd86a --- /dev/null +++ b/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). \ No newline at end of file diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py index 55971cbf42..57e1c42e96 100644 --- a/plugins/modules/keycloak_userprofile.py +++ b/plugins/modules/keycloak_userprofile.py @@ -612,9 +612,7 @@ def main(): attribute['validations']['person-name-prohibited-characters'] = ( attribute['validations'].pop('personNameProhibitedCharacters') ) - # special JSON parsing for kc_user_profile_config - value = json.dumps(kc_user_profile_config[0]) - changeset[camel(component_param)][config_param].append(value) + changeset[camel(component_param)][config_param].append(kc_user_profile_config[0]) # usual camelCase parameters else: changeset[camel(component_param)][camel(config_param)] = [] @@ -662,6 +660,10 @@ def main(): changeset['id'] = userprofile_id changeset_copy['id'] = userprofile_id + # keycloak returns kc.user.profile.config as a single JSON formatted string, so we have to deserialize it + if 'config' in userprofile and 'kc.user.profile.config' in userprofile['config']: + userprofile['config']['kc.user.profile.config'][0] = json.loads(userprofile['config']['kc.user.profile.config'][0]) + # Compare top-level parameters for param, value in changeset.items(): before_realm_userprofile[param] = userprofile[param] @@ -680,6 +682,10 @@ def main(): # Check all the possible states of the resource and do what is needed to # converge current state with desired state (create, update or delete # the userprofile). + + # keycloak expects kc.user.profile.config as a single JSON formatted string, so we have to serialize it + if 'config' in changeset and 'kc.user.profile.config' in changeset['config']: + changeset['config']['kc.user.profile.config'][0] = json.dumps(changeset['config']['kc.user.profile.config'][0]) if userprofile_id and state == 'present': if result['changed']: if module._diff: diff --git a/tests/unit/plugins/modules/test_keycloak_userprofile.py b/tests/unit/plugins/modules/test_keycloak_userprofile.py index 3001201efa..3ae01bbb8b 100644 --- a/tests/unit/plugins/modules/test_keycloak_userprofile.py +++ b/tests/unit/plugins/modules/test_keycloak_userprofile.py @@ -17,6 +17,8 @@ from ansible_collections.community.general.plugins.modules import keycloak_userp from itertools import count +from json import dumps + from ansible.module_utils.six import StringIO @@ -509,7 +511,7 @@ class TestKeycloakUserprofile(ModuleTestCase): "providerType": "org.keycloak.userprofile.UserProfileProvider", "config": { "kc.user.profile.config": [ - { + dumps({ "attributes": [ { "name": "username", @@ -625,7 +627,7 @@ class TestKeycloakUserprofile(ModuleTestCase): "displayDescription": "Attributes, which refer to user metadata", } ], - } + }) ] } } @@ -714,7 +716,7 @@ class TestKeycloakUserprofile(ModuleTestCase): "providerType": "org.keycloak.userprofile.UserProfileProvider", "config": { "kc.user.profile.config": [ - { + dumps({ "attributes": [ { "name": "username", @@ -830,7 +832,7 @@ class TestKeycloakUserprofile(ModuleTestCase): "displayDescription": "Attributes, which refer to user metadata", } ], - } + }) ] } } From 24b74cc4b9976dd0e3cacf94316e4d131c409fbf Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 7 Oct 2024 21:16:29 +0100 Subject: [PATCH 280/482] opennebula inventory: add VM ID and VM host to data (#8532) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add VM id and VM host to opennebula inventory data ##### SUMMARY To enable greater use of the inventory, add the ID of the VM, and the hostname of the host the VM is running on to the inventory output ##### ISSUE TYPE - Feature Pull Request ##### COMPONENT NAME opennebula.py ##### ADDITIONAL INFORMATION ```paste below "host": "foo23.host", "id": 1234, ``` * Create 8532-expand-opennuebula-inventory-data.yml * Update opennebula.py * Update changelogs/fragments/8532-expand-opennuebula-inventory-data.yml Co-authored-by: Felix Fontein * Add check for empty records and add test * fix attribute test * fix attribute test * fix attribute test * fix attribute test * Update plugins/inventory/opennebula.py Co-authored-by: Felix Fontein * update as per guidance * restore attribute checks * fix attr * fix indent * PR Fixes * add attribute check in case of empty variable --------- Co-authored-by: Felix Fontein Co-authored-by: Александр Бакановский --- ...8532-expand-opennuebula-inventory-data.yml | 2 ++ plugins/inventory/opennebula.py | 3 +++ .../unit/plugins/inventory/test_opennebula.py | 23 ++++++++++++++++--- 3 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/8532-expand-opennuebula-inventory-data.yml diff --git a/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml b/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml new file mode 100644 index 0000000000..a1b0ffe2c0 --- /dev/null +++ b/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml @@ -0,0 +1,2 @@ +minor_changes: + - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index bf81758ef1..077d3da5a3 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -199,6 +199,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): continue server['name'] = vm.NAME + server['id'] = vm.ID + if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: + server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME server['LABELS'] = labels server['v4_first_ip'] = self._get_vm_ipv4(vm) server['v6_first_ip'] = self._get_vm_ipv6(vm) diff --git a/tests/unit/plugins/inventory/test_opennebula.py b/tests/unit/plugins/inventory/test_opennebula.py index bbc2fe699a..52ea934043 100644 --- a/tests/unit/plugins/inventory/test_opennebula.py +++ b/tests/unit/plugins/inventory/test_opennebula.py @@ -21,6 +21,23 @@ from ansible_collections.community.general.plugins.inventory.opennebula import I from ansible_collections.community.general.tests.unit.compat.mock import create_autospec +class HistoryEntry(object): + def __init__(self): + self.SEQ = '384' + self.HOSTNAME = 'sam-691-sam' + self.HID = '10' + self.CID = '0' + self.DS_ID = '100' + self.VM_MAD = 'kvm' + self.TM_MAD = '3par' + self.ACTION = '0' + + +class HistoryRecords(object): + def __init__(self): + self.HISTORY = [HistoryEntry()] + + @pytest.fixture def inventory(): r = InventoryModule() @@ -58,7 +75,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 132, 'GNAME': 'CSApparelVDC', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': HistoryRecords(), 'ID': 7157, 'LAST_POLL': 1632762935, 'LCM_STATE': 3, @@ -104,7 +121,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 0, 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': [], 'ID': 327, 'LAST_POLL': 1632763543, 'LCM_STATE': 3, @@ -167,7 +184,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 0, 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': [], 'ID': 107, 'LAST_POLL': 1632764186, 'LCM_STATE': 3, From 5e6b8e53274095c7a62b8b4dadf2a9c18ac2e562 Mon Sep 17 00:00:00 2001 From: JaegerMaKn Date: Mon, 7 Oct 2024 22:26:19 +0200 Subject: [PATCH 281/482] dig lookup: Allow to pass port for DNS lookup (#8966) dnspython accepts a port as part of the nameserver. Currently, the nameservers are passed as strings which leads dnspython to create Nameserver objects out of them using the port that is currently set in the Resolver instance. That creation of Nameserver objects is done right when the `nameservers` property is set. If a port is to be set by us, the `port` attribute of the Resolver needs to be set before the nameservers are passed to the Resolver so when the nameservers are passed, that new port is used to create the Nameserver objects. Therefore, the assignment of the `nameservers` property of the Resolver is moved after the argument processing so the `port` attribute is (if it's given in the lookup-call) definitely set before the `nameservers` property. --- changelogs/fragments/8966-dig-add-port-option.yml | 4 ++++ plugins/lookup/dig.py | 8 ++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelogs/fragments/8966-dig-add-port-option.yml diff --git a/changelogs/fragments/8966-dig-add-port-option.yml b/changelogs/fragments/8966-dig-add-port-option.yml new file mode 100644 index 0000000000..e92f355dd5 --- /dev/null +++ b/changelogs/fragments/8966-dig-add-port-option.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). +... diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 7716331825..aae5ffe834 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -75,6 +75,11 @@ DOCUMENTATION = ''' default: false type: bool version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 notes: - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. @@ -336,6 +341,7 @@ class LookupModule(LookupBase): fail_on_error = self.get_option('fail_on_error') real_empty = self.get_option('real_empty') tcp = self.get_option('tcp') + port = self.get_option('port') try: rdclass = dns.rdataclass.from_text(self.get_option('class')) except Exception as e: @@ -396,6 +402,8 @@ class LookupModule(LookupBase): # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + if port: + myres.port = port if len(nameservers) > 0: myres.nameservers = nameservers From 464812a2c28dcae652f24345cf9d337c65d5f69c Mon Sep 17 00:00:00 2001 From: Boolman Date: Mon, 7 Oct 2024 22:29:13 +0200 Subject: [PATCH 282/482] keycloak_client add option to support client-x509 authentication (#8973) * keycloak_client: add client-x509 option to client_authenticator_type Signed-off-by: boolman * keycloak_client: add attributes for client-x509 Signed-off-by: boolman * keycloak_client update description Signed-off-by: boolman * keycloak_client add fragment Signed-off-by: boolman * remove trailing whitespace Signed-off-by: boolman * keycloak_client add example with x509 authentication Signed-off-by: boolman * Update plugins/modules/keycloak_client.py Co-authored-by: Felix Fontein * Update changelogs/fragments/8973-keycloak_client-add-x509-auth.yml Co-authored-by: Felix Fontein * keycloak_client added type on new suboptions Signed-off-by: boolman --------- Signed-off-by: boolman Co-authored-by: Felix Fontein --- .../8973-keycloak_client-add-x509-auth.yml | 2 + plugins/modules/keycloak_client.py | 42 +++++++++++++++---- 2 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/8973-keycloak_client-add-x509-auth.yml diff --git a/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml b/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml new file mode 100644 index 0000000000..a7bc125f82 --- /dev/null +++ b/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973). diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index d7e4fb0b7e..d2800be292 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -108,13 +108,14 @@ options: client_authenticator_type: description: - - How do clients authenticate with the auth server? Either V(client-secret) or - V(client-jwt) can be chosen. When using V(client-secret), the module parameter - O(secret) can set it, while for V(client-jwt), you can use the keys C(use.jwks.url), + - How do clients authenticate with the auth server? Either V(client-secret), + V(client-jwt), or V(client-x509) can be chosen. When using V(client-secret), the module parameter + O(secret) can set it, for V(client-jwt), you can use the keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter - to configure its behavior. + to configure its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) + and C(x509.subjectdn) in the O(attributes) module parameter to configure which certificate(s) to accept. - This is 'clientAuthenticatorType' in the Keycloak REST API. - choices: ['client-secret', 'client-jwt'] + choices: ['client-secret', 'client-jwt', 'client-x509'] aliases: - clientAuthenticatorType type: str @@ -533,7 +534,6 @@ options: description: - SAML Redirect Binding URL for the client's assertion consumer service (login responses). - saml_force_name_id_format: description: - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. @@ -581,6 +581,18 @@ options: - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded. + x509.subjectdn: + description: + - For OpenID-Connect clients, subject which will be used to authenticate the client. + type: str + version_added: 9.5.0 + + x509.allow.regex.pattern.comparison: + description: + - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression. + type: bool + version_added: 9.5.0 + extends_documentation_fragment: - community.general.keycloak - community.general.attributes @@ -624,6 +636,22 @@ EXAMPLES = ''' delegate_to: localhost +- name: Create or update a Keycloak client (minimal example), with x509 authentication + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: present + client_id: test + client_authenticator_type: client-x509 + attributes: + x509.subjectdn: "CN=client" + x509.allow.regex.pattern.comparison: false + + - name: Create or update a Keycloak client (with all the bells and whistles) community.general.keycloak_client: auth_client_id: admin-cli @@ -913,7 +941,7 @@ def main(): base_url=dict(type='str', aliases=['baseUrl']), surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt', 'client-x509'], aliases=['clientAuthenticatorType']), secret=dict(type='str', no_log=True), registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), From b523d1b1c936cd6f7bacbd5ee6a78c5944cc6296 Mon Sep 17 00:00:00 2001 From: Pierre-yves Fontaniere Date: Mon, 7 Oct 2024 23:00:01 +0200 Subject: [PATCH 283/482] Remove 'CapacityBytes' from list of required parameters (#8956) * Remove 'CapacityBytes' from list of required parameters * Add CHANGELOG fragment * Fix sanity test failure whitespace before ']' * Update changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Add description for the volume_details key CapacityBytes * Update plugins/modules/redfish_config.py Co-authored-by: Mike Raineri * Adjust description. --------- Co-authored-by: Pierre-yves FONTANIERE Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Mike Raineri Co-authored-by: Felix Fontein --- ...remove-capacitybytes-from-the-required-parameters_list.yml | 2 ++ plugins/module_utils/redfish_utils.py | 4 ++-- plugins/modules/redfish_config.py | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml diff --git a/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml b/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml new file mode 100644 index 0000000000..d6879ccb06 --- /dev/null +++ b/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 102d826e6d..30309ac0a9 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -3777,8 +3777,8 @@ class RedfishUtils(object): 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} # Validate input parameters - required_parameters = ['RAIDType', 'Drives', 'CapacityBytes'] - allowed_parameters = ['DisplayName', 'InitializeMethod', 'MediaSpanCount', + required_parameters = ['RAIDType', 'Drives'] + allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] for parameter in required_parameters: diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 25f3cffdb4..03146558f3 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -164,6 +164,9 @@ options: required: false description: - Setting dict of volume to be created. + - If C(CapacityBytes) key is not specified in this dictionary, the size of + the volume will be determined by the Redfish service. It is possible the + size will not be the maximum available size. type: dict default: {} version_added: '7.5.0' From 447d4b026768b08f6ab3315055f1802ea7552103 Mon Sep 17 00:00:00 2001 From: Pierre-yves Fontaniere Date: Mon, 7 Oct 2024 23:00:56 +0200 Subject: [PATCH 284/482] redfish_config new bool parameter to automatically delete 'None' type volumes. (#8990) * Add a new boolean parameter storage_none_volume_deletion to the volume creation command of redfish_config * Add description for storage_none_volume_deletion redfish_config parameter * Update plugins/module_utils/redfish_utils.py Co-authored-by: Mike Raineri * Update plugins/modules/redfish_config.py Co-authored-by: Mike Raineri * Add CHANGELOG fragment * Add punctuation. --------- Co-authored-by: Pierre-yves FONTANIERE Co-authored-by: Mike Raineri Co-authored-by: Felix Fontein --- changelogs/fragments/8990.yml | 3 +++ plugins/module_utils/redfish_utils.py | 29 ++++++++++++++------------- plugins/modules/redfish_config.py | 11 +++++++++- 3 files changed, 28 insertions(+), 15 deletions(-) create mode 100644 changelogs/fragments/8990.yml diff --git a/changelogs/fragments/8990.yml b/changelogs/fragments/8990.yml new file mode 100644 index 0000000000..716fd3c983 --- /dev/null +++ b/changelogs/fragments/8990.yml @@ -0,0 +1,3 @@ +minor_changes: + - redfish_config - add parameter ``storage_none_volume_deletion`` to + ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). \ No newline at end of file diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 30309ac0a9..28d6f2ef0c 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -3742,7 +3742,7 @@ class RedfishUtils(object): return {'ret': True, 'changed': True, 'msg': "The following volumes were deleted: %s" % str(volume_ids)} - def create_volume(self, volume_details, storage_subsystem_id): + def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): # Find the Storage resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: @@ -3794,22 +3794,23 @@ class RedfishUtils(object): data = response['data'] # Deleting any volumes of RAIDType None present on the Storage Subsystem - response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) - if response['ret'] is False: - return response - volume_data = response['data'] + if storage_none_volume_deletion: + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + volume_data = response['data'] - if "Members" in volume_data: - for member in volume_data["Members"]: - response = self.get_request(self.root_uri + member['@odata.id']) - if response['ret'] is False: - return response - member_data = response['data'] - - if member_data["RAIDType"] == "None": - response = self.delete_request(self.root_uri + member['@odata.id']) + if "Members" in volume_data: + for member in volume_data["Members"]: + response = self.get_request(self.root_uri + member['@odata.id']) if response['ret'] is False: return response + member_data = response['data'] + + if member_data["RAIDType"] == "None": + response = self.delete_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response # Construct payload and issue POST command to create volume volume_details["Links"] = {} diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 03146558f3..5b9caecc64 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -145,6 +145,13 @@ options: type: str default: '' version_added: '7.3.0' + storage_none_volume_deletion: + required: false + description: + - Indicates if all non-RAID volumes are automatically deleted prior to creating the new volume. + type: bool + default: false + version_added: '9.5.0' volume_ids: required: false description: @@ -418,6 +425,7 @@ def main(): hostinterface_id=dict(), sessions_config=dict(type='dict', default={}), storage_subsystem_id=dict(type='str', default=''), + storage_none_volume_deletion=dict(type='bool', default=False), volume_ids=dict(type='list', default=[], elements='str'), secure_boot_enable=dict(type='bool', default=True), volume_details=dict(type='dict', default={}), @@ -484,6 +492,7 @@ def main(): # Volume creation options volume_details = module.params['volume_details'] storage_subsystem_id = module.params['storage_subsystem_id'] + storage_none_volume_deletion = module.params['storage_none_volume_deletion'] # ciphers ciphers = module.params['ciphers'] @@ -527,7 +536,7 @@ def main(): elif command == "DeleteVolumes": result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids) elif command == "CreateVolume": - result = rf_utils.create_volume(volume_details, storage_subsystem_id) + result = rf_utils.create_volume(volume_details, storage_subsystem_id, storage_none_volume_deletion) elif category == "Manager": # execute only if we find a Manager service resource From ec6496024f45a2aea65fb6506db8809ab33fcfbb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 7 Oct 2024 23:37:44 +0200 Subject: [PATCH 285/482] Prepare 10.0.0 release (#8921) * Bump version to 10.0.0, remove deprecated modules and plugins. * Remove redhat module utils. * Drop support for ansible-core 2.13 and ansible-core 2.14. --- .github/BOTMETA.yml | 8 - .github/workflows/ansible-test.yml | 74 +- README.md | 2 +- changelogs/fragments/removals.yml | 10 + galaxy.yml | 2 +- meta/runtime.yml | 10 +- plugins/callback/hipchat.py | 240 ------ plugins/module_utils/redhat.py | 76 -- plugins/modules/consul_acl.py | 695 ------------------ plugins/modules/rhn_channel.py | 210 ------ plugins/modules/rhn_register.py | 465 ------------ tests/sanity/extra/botmeta.py | 1 - tests/unit/plugins/modules/rhn_conftest.py | 35 - .../unit/plugins/modules/test_rhn_channel.py | 147 ---- .../unit/plugins/modules/test_rhn_register.py | 293 -------- 15 files changed, 30 insertions(+), 2238 deletions(-) create mode 100644 changelogs/fragments/removals.yml delete mode 100644 plugins/callback/hipchat.py delete mode 100644 plugins/module_utils/redhat.py delete mode 100644 plugins/modules/consul_acl.py delete mode 100644 plugins/modules/rhn_channel.py delete mode 100644 plugins/modules/rhn_register.py delete mode 100644 tests/unit/plugins/modules/rhn_conftest.py delete mode 100644 tests/unit/plugins/modules/test_rhn_channel.py delete mode 100644 tests/unit/plugins/modules/test_rhn_register.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index be0bf6da30..bcf300025f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -61,7 +61,6 @@ files: $callbacks/elastic.py: keywords: apm observability maintainers: v1v - $callbacks/hipchat.py: {} $callbacks/jabber.py: {} $callbacks/log_plays.py: {} $callbacks/loganalytics.py: @@ -1161,12 +1160,6 @@ files: keywords: kvm libvirt proxmox qemu labels: rhevm virt maintainers: $team_virt TimothyVandenbrande - $modules/rhn_channel.py: - labels: rhn_channel - maintainers: vincentvdk alikins $team_rhn - $modules/rhn_register.py: - labels: rhn_register - maintainers: jlaska $team_rhn $modules/rhsm_release.py: maintainers: seandst $team_rhsm $modules/rhsm_repository.py: @@ -1554,7 +1547,6 @@ macros: team_oracle: manojmeda mross22 nalsaber team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt - team_rhn: FlossWare alikins barnabycourt vritant team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index 89a3006f56..ca06791a38 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -29,8 +29,6 @@ jobs: strategy: matrix: ansible: - - '2.13' - - '2.14' - '2.15' # Ansible-test on various stable branches does not yet work well with cgroups v2. # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 @@ -67,16 +65,8 @@ jobs: exclude: - ansible: '' include: - - ansible: '2.13' + - ansible: '2.15' python: '2.7' - - ansible: '2.13' - python: '3.8' - - ansible: '2.13' - python: '2.7' - - ansible: '2.13' - python: '3.8' - - ansible: '2.14' - python: '3.9' - ansible: '2.15' python: '3.5' - ansible: '2.15' @@ -121,57 +111,19 @@ jobs: exclude: - ansible: '' include: - # 2.13 - - ansible: '2.13' - docker: fedora35 - python: '' - target: azp/posix/1/ - - ansible: '2.13' - docker: fedora35 - python: '' - target: azp/posix/2/ - - ansible: '2.13' - docker: fedora35 - python: '' - target: azp/posix/3/ - - ansible: '2.13' - docker: opensuse15py2 - python: '' - target: azp/posix/1/ - - ansible: '2.13' - docker: opensuse15py2 - python: '' - target: azp/posix/2/ - - ansible: '2.13' - docker: opensuse15py2 - python: '' - target: azp/posix/3/ - - ansible: '2.13' - docker: alpine3 - python: '' - target: azp/posix/1/ - - ansible: '2.13' - docker: alpine3 - python: '' - target: azp/posix/2/ - - ansible: '2.13' - docker: alpine3 - python: '' - target: azp/posix/3/ - # 2.14 - - ansible: '2.14' - docker: alpine3 - python: '' - target: azp/posix/1/ - - ansible: '2.14' - docker: alpine3 - python: '' - target: azp/posix/2/ - - ansible: '2.14' - docker: alpine3 - python: '' - target: azp/posix/3/ # 2.15 + - ansible: '2.15' + docker: alpine3 + python: '' + target: azp/posix/1/ + - ansible: '2.15' + docker: alpine3 + python: '' + target: azp/posix/2/ + - ansible: '2.15' + docker: alpine3 + python: '' + target: azp/posix/3/ - ansible: '2.15' docker: fedora37 python: '' diff --git a/README.md b/README.md index 4edd58edb3..03dad49f39 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ For more information about communication, see the [Ansible communication guide]( ## Tested with Ansible -Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. +Tested with the current ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.15.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements diff --git a/changelogs/fragments/removals.yml b/changelogs/fragments/removals.yml new file mode 100644 index 0000000000..1a1f137194 --- /dev/null +++ b/changelogs/fragments/removals.yml @@ -0,0 +1,10 @@ +removed_features: + - "The hipchat callback plugin has been removed. The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020 (https://github.com/ansible-collections/community.general/pull/8921)." + - "The consul_acl module has been removed. Use community.general.consul_token and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921)." + - "The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921)." + - "The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921)." + - "The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921)." +breaking_changes: + - The collection no longer supports ansible-core 2.13 and ansible-core 2.14. + While most (or even all) modules and plugins might still work with these versions, they are no longer tested in CI and breakages regarding them will not be fixed + (https://github.com/ansible-collections/community.general/pull/8921)." diff --git a/galaxy.yml b/galaxy.yml index 5112bdc64f..3af5356d06 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.5.0 +version: 10.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index 5d4ed8cb89..f5adb64712 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -requires_ansible: '>=2.13.0' +requires_ansible: '>=2.15.0' action_groups: consul: - consul_agent_check @@ -44,7 +44,7 @@ plugin_routing: warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option. hipchat: - deprecation: + tombstone: removal_version: 10.0.0 warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. osx_say: @@ -72,7 +72,7 @@ plugin_routing: redirect: infoblox.nios_modules.nios_next_network modules: consul_acl: - deprecation: + tombstone: removal_version: 10.0.0 warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. hipchat: @@ -184,12 +184,12 @@ plugin_routing: removal_version: 9.0.0 warning_text: This module relied on the deprecated package pyrax. rhn_channel: - deprecation: + tombstone: removal_version: 10.0.0 warning_text: RHN is EOL, please contact the community.general maintainers if still using this; see the module documentation for more details. rhn_register: - deprecation: + tombstone: removal_version: 10.0.0 warning_text: RHN is EOL, please contact the community.general maintainers if still using this; see the module documentation for more details. diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py deleted file mode 100644 index bf0d425303..0000000000 --- a/plugins/callback/hipchat.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014, Matt Martz -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: hipchat - type: notification - requirements: - - whitelist in configuration. - - prettytable (python lib) - short_description: post task events to hipchat - description: - - This callback plugin sends status updates to a HipChat channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin. - deprecated: - removed_in: 10.0.0 - why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. - alternative: There is none. - options: - token: - description: HipChat API token for v1 or v2 API. - type: str - required: true - env: - - name: HIPCHAT_TOKEN - ini: - - section: callback_hipchat - key: token - api_version: - description: HipChat API version, v1 or v2. - type: str - choices: - - v1 - - v2 - required: false - default: v1 - env: - - name: HIPCHAT_API_VERSION - ini: - - section: callback_hipchat - key: api_version - room: - description: HipChat room to post in. - type: str - default: ansible - env: - - name: HIPCHAT_ROOM - ini: - - section: callback_hipchat - key: room - from: - description: Name to post as - type: str - default: ansible - env: - - name: HIPCHAT_FROM - ini: - - section: callback_hipchat - key: from - notify: - description: Add notify flag to important messages - type: bool - default: true - env: - - name: HIPCHAT_NOTIFY - ini: - - section: callback_hipchat - key: notify - -''' - -import os -import json - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - -from ansible.plugins.callback import CallbackBase -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - - -class CallbackModule(CallbackBase): - """This is an example ansible callback plugin that sends status - updates to a HipChat channel during playbook execution. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.hipchat' - CALLBACK_NEEDS_WHITELIST = True - - API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' - API_V2_URL = 'https://api.hipchat.com/v2/' - - def __init__(self): - - super(CallbackModule, self).__init__() - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not installed. ' - 'Disabling the HipChat callback plugin.') - self.printed_playbook = False - self.playbook_name = None - self.play = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.token = self.get_option('token') - self.api_version = self.get_option('api_version') - self.from_name = self.get_option('from') - self.allow_notify = self.get_option('notify') - self.room = self.get_option('room') - - if self.token is None: - self.disabled = True - self._display.warning('HipChat token could not be loaded. The HipChat ' - 'token can be provided using the `HIPCHAT_TOKEN` ' - 'environment variable.') - - # Pick the request handler. - if self.api_version == 'v2': - self.send_msg = self.send_msg_v2 - else: - self.send_msg = self.send_msg_v1 - - def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} - - body = {} - body['room_id'] = self.room - body['from'] = self.from_name[:15] # max length is 15 - body['message'] = msg - body['message_format'] = msg_format - body['color'] = color - body['notify'] = self.allow_notify and notify - - data = json.dumps(body) - url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) - try: - response = open_url(url, data=data, headers=headers, method='POST') - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - params = {} - params['room_id'] = self.room - params['from'] = self.from_name[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['notify'] = int(self.allow_notify and notify) - - url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) - try: - response = open_url(url, data=urlencode(params)) - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - - self.play = play - name = play.name - # This block sends information about a playbook when it starts - # The playbook object is not immediately available at - # playbook_on_start so we grab it via the play - # - # Displays info about playbook being started by a person on an - # inventory, as well as Tags, Skip Tags and Limits - if not self.printed_playbook: - self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) - host_list = self.play.playbook.inventory.host_list - inventory = os.path.basename(os.path.realpath(host_list)) - self.send_msg("%s: Playbook initiated by %s against %s" % - (self.playbook_name, - self.play.playbook.remote_user, - inventory), notify=True) - self.printed_playbook = True - subset = self.play.playbook.inventory._subset - skip_tags = self.play.playbook.skip_tags - self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % - (self.playbook_name, - ', '.join(self.play.playbook.only_tags), - ', '.join(skip_tags) if skip_tags else None, - ', '.join(subset) if subset else subset)) - - # This is where we actually say we are starting a play - self.send_msg("%s: Starting play: %s" % - (self.playbook_name, name)) - - def playbook_on_stats(self, stats): - """Display info about playbook statistics""" - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures']]) - - self.send_msg("%s: Playbook complete" % self.playbook_name, - notify=True) - - if failures or unreachable: - color = 'red' - self.send_msg("%s: Failures detected" % self.playbook_name, - color=color, notify=True) - else: - color = 'green' - - self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py deleted file mode 100644 index 321386a0a5..0000000000 --- a/plugins/module_utils/redhat.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), James Laska -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import shutil -import tempfile - -from ansible.module_utils.six.moves import configparser - - -class RegistrationBase(object): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 10.0.0. - There is no replacement for it; please contact the community.general - maintainers in case you are using it. - """ - - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if os.path.isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py deleted file mode 100644 index 2d60af0625..0000000000 --- a/plugins/modules/consul_acl.py +++ /dev/null @@ -1,695 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_acl -short_description: Manipulate Consul ACL keys and rules -description: - - Allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/guides/acl.html. -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -deprecated: - removed_in: 10.0.0 - why: The legacy ACL system was removed from Consul. - alternative: Use M(community.general.consul_token) and/or M(community.general.consul_policy) instead. -options: - mgmt_token: - description: - - a management token is required to manipulate the acl lists - required: true - type: str - state: - description: - - whether the ACL pair should be present or absent - required: false - choices: ['present', 'absent'] - default: present - type: str - token_type: - description: - - the type of token that should be created - choices: ['client', 'management'] - default: client - type: str - name: - description: - - the name that should be associated with the acl key, this is opaque - to Consul - required: false - type: str - token: - description: - - the token key identifying an ACL rule set. If generated by consul - this will be a UUID - required: false - type: str - rules: - type: list - elements: dict - description: - - rules that should be associated with a given token - required: false - host: - description: - - host of the consul agent defaults to localhost - required: false - default: localhost - type: str - port: - type: int - description: - - the port on which the consul agent is running - required: false - default: 8500 - scheme: - description: - - the protocol scheme on which the consul agent is running - required: false - default: http - type: str - validate_certs: - type: bool - description: - - whether to verify the tls certificate of the consul agent - required: false - default: true -requirements: - - python-consul - - pyhcl - - requests -''' - -EXAMPLES = """ -- name: Create an ACL with rules - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - rules: - - key: "foo" - policy: read - - key: "private/foo" - policy: deny - -- name: Create an ACL with a specific token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: my-token - rules: - - key: "foo" - policy: read - -- name: Update the rules associated to an ACL token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: some_client_token - rules: - - event: "bbq" - policy: write - - key: "foo" - policy: read - - key: "private" - policy: deny - - keyring: write - - node: "hgs4" - policy: write - - operator: read - - query: "" - policy: write - - service: "consul" - policy: write - - session: "standup" - policy: write - -- name: Remove a token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e - state: absent -""" - -RETURN = """ -token: - description: the token associated to the ACL (the ACL's ID) - returned: success - type: str - sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da -rules: - description: the HCL JSON representation of the rules associated to the ACL, in the format described in the - Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). - returned: when O(state=present) - type: dict - sample: { - "key": { - "foo": { - "policy": "write" - }, - "bar": { - "policy": "deny" - } - } - } -operation: - description: the operation performed on the ACL - returned: changed - type: str - sample: update -""" - - -try: - import consul - python_consul_installed = True -except ImportError: - python_consul_installed = False - -try: - import hcl - pyhcl_installed = True -except ImportError: - pyhcl_installed = False - -try: - from requests.exceptions import ConnectionError - has_requests = True -except ImportError: - has_requests = False - -from collections import defaultdict -from ansible.module_utils.basic import to_text, AnsibleModule - - -RULE_SCOPES = [ - "agent", - "agent_prefix", - "event", - "event_prefix", - "key", - "key_prefix", - "keyring", - "node", - "node_prefix", - "operator", - "query", - "query_prefix", - "service", - "service_prefix", - "session", - "session_prefix", -] - -MANAGEMENT_PARAMETER_NAME = "mgmt_token" -HOST_PARAMETER_NAME = "host" -SCHEME_PARAMETER_NAME = "scheme" -VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" -NAME_PARAMETER_NAME = "name" -PORT_PARAMETER_NAME = "port" -RULES_PARAMETER_NAME = "rules" -STATE_PARAMETER_NAME = "state" -TOKEN_PARAMETER_NAME = "token" -TOKEN_TYPE_PARAMETER_NAME = "token_type" - -PRESENT_STATE_VALUE = "present" -ABSENT_STATE_VALUE = "absent" - -CLIENT_TOKEN_TYPE_VALUE = "client" -MANAGEMENT_TOKEN_TYPE_VALUE = "management" - -REMOVE_OPERATION = "remove" -UPDATE_OPERATION = "update" -CREATE_OPERATION = "create" - -_POLICY_JSON_PROPERTY = "policy" -_RULES_JSON_PROPERTY = "Rules" -_TOKEN_JSON_PROPERTY = "ID" -_TOKEN_TYPE_JSON_PROPERTY = "Type" -_NAME_JSON_PROPERTY = "Name" -_POLICY_YML_PROPERTY = "policy" -_POLICY_HCL_PROPERTY = "policy" - -_ARGUMENT_SPEC = { - MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), - HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), - NAME_PARAMETER_NAME: dict(), - PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(type='list', elements='dict'), - STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(no_log=False), - TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], - default=CLIENT_TOKEN_TYPE_VALUE) -} - - -def set_acl(consul_client, configuration): - """ - Sets an ACL based on the given configuration. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of setting the ACL - """ - acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = {acl.name: acl for acl in acls_as_json if acl.name is not None} - existing_acls_mapped_by_token = {acl.token: acl for acl in acls_as_json} - if None in existing_acls_mapped_by_token: - raise AssertionError("expecting ACL list to be associated to a token: %s" % - existing_acls_mapped_by_token[None]) - - if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: - # No token but name given so can get token from name - configuration.token = existing_acls_mapped_by_name[configuration.name].token - - if configuration.token and configuration.token in existing_acls_mapped_by_token: - return update_acl(consul_client, configuration) - else: - if configuration.token in existing_acls_mapped_by_token: - raise AssertionError() - if configuration.name in existing_acls_mapped_by_name: - raise AssertionError() - return create_acl(consul_client, configuration) - - -def update_acl(consul_client, configuration): - """ - Updates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the update - """ - existing_acl = load_acl_with_token(consul_client, configuration.token) - changed = existing_acl.rules != configuration.rules - - if changed: - name = configuration.name if configuration.name is not None else existing_acl.name - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) - updated_token = consul_client.acl.update( - configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) - if updated_token != configuration.token: - raise AssertionError() - - return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) - - -def create_acl(consul_client, configuration): - """ - Creates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the creation - """ - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None - token = consul_client.acl.create( - name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) - rules = configuration.rules - return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) - - -def remove_acl(consul, configuration): - """ - Removes an ACL. - :param consul: the consul client - :param configuration: the run configuration - :return: the output of the removal - """ - token = configuration.token - changed = consul.acl.info(token) is not None - if changed: - consul.acl.destroy(token) - return Output(changed=changed, token=token, operation=REMOVE_OPERATION) - - -def load_acl_with_token(consul, token): - """ - Loads the ACL with the given token (token == rule ID). - :param consul: the consul client - :param token: the ACL "token"/ID (not name) - :return: the ACL associated to the given token - :exception ConsulACLTokenNotFoundException: raised if the given token does not exist - """ - acl_as_json = consul.acl.info(token) - if acl_as_json is None: - raise ConsulACLNotFoundException(token) - return decode_acl_as_json(acl_as_json) - - -def encode_rules_as_hcl_string(rules): - """ - Converts the given rules into the equivalent HCL (string) representation. - :param rules: the rules - :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal - note for justification) - """ - if len(rules) == 0: - # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty - # string if there is no rules... - return None - rules_as_hcl = "" - for rule in rules: - rules_as_hcl += encode_rule_as_hcl_string(rule) - return rules_as_hcl - - -def encode_rule_as_hcl_string(rule): - """ - Converts the given rule into the equivalent HCL (string) representation. - :param rule: the rule - :return: the equivalent HCL (string) representation of the rule - """ - if rule.pattern is not None: - return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) - else: - return '%s = "%s"\n' % (rule.scope, rule.policy) - - -def decode_rules_as_hcl_string(rules_as_hcl): - """ - Converts the given HCL (string) representation of rules into a list of rule domain models. - :param rules_as_hcl: the HCL (string) representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules_as_hcl = to_text(rules_as_hcl) - rules_as_json = hcl.loads(rules_as_hcl) - return decode_rules_as_json(rules_as_json) - - -def decode_rules_as_json(rules_as_json): - """ - Converts the given JSON representation of rules into a list of rule domain models. - :param rules_as_json: the JSON representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - for scope in rules_as_json: - if not isinstance(rules_as_json[scope], dict): - rules.add(Rule(scope, rules_as_json[scope])) - else: - for pattern, policy in rules_as_json[scope].items(): - rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) - return rules - - -def encode_rules_as_json(rules): - """ - Converts the given rules into the equivalent JSON representation according to the documentation: - https://www.consul.io/docs/guides/acl.html#rule-specification. - :param rules: the rules - :return: JSON representation of the given rules - """ - rules_as_json = defaultdict(dict) - for rule in rules: - if rule.pattern is not None: - if rule.pattern in rules_as_json[rule.scope]: - raise AssertionError() - rules_as_json[rule.scope][rule.pattern] = { - _POLICY_JSON_PROPERTY: rule.policy - } - else: - if rule.scope in rules_as_json: - raise AssertionError() - rules_as_json[rule.scope] = rule.policy - return rules_as_json - - -def decode_rules_as_yml(rules_as_yml): - """ - Converts the given YAML representation of rules into a list of rule domain models. - :param rules_as_yml: the YAML representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - if rules_as_yml: - for rule_as_yml in rules_as_yml: - rule_added = False - for scope in RULE_SCOPES: - if scope in rule_as_yml: - if rule_as_yml[scope] is None: - raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) - policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ - else rule_as_yml[scope] - pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None - rules.add(Rule(scope, policy, pattern)) - rule_added = True - break - if not rule_added: - raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) - return rules - - -def decode_acl_as_json(acl_as_json): - """ - Converts the given JSON representation of an ACL into the equivalent domain model. - :param acl_as_json: the JSON representation of an ACL - :return: the equivalent domain model to the given ACL - """ - rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] - rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ - else RuleCollection() - return ACL( - rules=rules, - token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], - token=acl_as_json[_TOKEN_JSON_PROPERTY], - name=acl_as_json[_NAME_JSON_PROPERTY] - ) - - -def decode_acls_as_json(acls_as_json): - """ - Converts the given JSON representation of ACLs into a list of ACL domain models. - :param acls_as_json: the JSON representation of a collection of ACLs - :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) - """ - return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] - - -class ConsulACLNotFoundException(Exception): - """ - Exception raised if an ACL with is not found. - """ - - -class Configuration: - """ - Configuration for this module. - """ - - def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, - rules=None, state=None, token=None, token_type=None): - self.management_token = management_token # type: str - self.host = host # type: str - self.scheme = scheme # type: str - self.validate_certs = validate_certs # type: bool - self.name = name # type: str - self.port = port # type: int - self.rules = rules # type: RuleCollection - self.state = state # type: str - self.token = token # type: str - self.token_type = token_type # type: str - - -class Output: - """ - Output of an action of this module. - """ - - def __init__(self, changed=None, token=None, rules=None, operation=None): - self.changed = changed # type: bool - self.token = token # type: str - self.rules = rules # type: RuleCollection - self.operation = operation # type: str - - -class ACL: - """ - Consul ACL. See: https://www.consul.io/docs/guides/acl.html. - """ - - def __init__(self, rules, token_type, token, name): - self.rules = rules - self.token_type = token_type - self.token = token - self.name = name - - def __eq__(self, other): - return other \ - and isinstance(other, self.__class__) \ - and self.rules == other.rules \ - and self.token_type == other.token_type \ - and self.token == other.token \ - and self.name == other.name - - def __hash__(self): - return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) - - -class Rule: - """ - ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. - """ - - def __init__(self, scope, policy, pattern=None): - self.scope = scope - self.policy = policy - self.pattern = pattern - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and self.scope == other.scope \ - and self.policy == other.policy \ - and self.pattern == other.pattern - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) - - def __str__(self): - return encode_rule_as_hcl_string(self) - - -class RuleCollection: - """ - Collection of ACL rules, which are part of a Consul ACL. - """ - - def __init__(self): - self._rules = {} - for scope in RULE_SCOPES: - self._rules[scope] = {} - - def __iter__(self): - all_rules = [] - for scope, pattern_keyed_rules in self._rules.items(): - for pattern, rule in pattern_keyed_rules.items(): - all_rules.append(rule) - return iter(all_rules) - - def __len__(self): - count = 0 - for scope in RULE_SCOPES: - count += len(self._rules[scope]) - return count - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and set(self) == set(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return encode_rules_as_hcl_string(self) - - def add(self, rule): - """ - Adds the given rule to this collection. - :param rule: model of a rule - :raises ValueError: raised if there already exists a rule for a given scope and pattern - """ - if rule.pattern in self._rules[rule.scope]: - patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" - raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) - self._rules[rule.scope][rule.pattern] = rule - - -def get_consul_client(configuration): - """ - Gets a Consul client for the given configuration. - - Does not check if the Consul client can connect. - :param configuration: the run configuration - :return: Consul client - """ - token = configuration.management_token - if token is None: - token = configuration.token - if token is None: - raise AssertionError("Expecting the management token to always be set") - return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, - verify=configuration.validate_certs, token=token) - - -def check_dependencies(): - """ - Checks that the required dependencies have been imported. - :exception ImportError: if it is detected that any of the required dependencies have not been imported - """ - if not python_consul_installed: - raise ImportError("python-consul required for this module. " - "See: https://python-consul.readthedocs.io/en/latest/#installation") - - if not pyhcl_installed: - raise ImportError("pyhcl required for this module. " - "See: https://pypi.org/project/pyhcl/") - - if not has_requests: - raise ImportError("requests required for this module. See https://pypi.org/project/requests/") - - -def main(): - """ - Main method. - """ - module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) - - try: - check_dependencies() - except ImportError as e: - module.fail_json(msg=str(e)) - - configuration = Configuration( - management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), - host=module.params.get(HOST_PARAMETER_NAME), - scheme=module.params.get(SCHEME_PARAMETER_NAME), - validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), - name=module.params.get(NAME_PARAMETER_NAME), - port=module.params.get(PORT_PARAMETER_NAME), - rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), - state=module.params.get(STATE_PARAMETER_NAME), - token=module.params.get(TOKEN_PARAMETER_NAME), - token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) - ) - consul_client = get_consul_client(configuration) - - try: - if configuration.state == PRESENT_STATE_VALUE: - output = set_acl(consul_client, configuration) - else: - output = remove_acl(consul_client, configuration) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - configuration.host, configuration.port, str(e))) - raise - - return_values = dict(changed=output.changed, token=output.token, operation=output.operation) - if output.rules is not None: - return_values["rules"] = encode_rules_as_json(output.rules) - module.exit_json(**return_values) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/rhn_channel.py b/plugins/modules/rhn_channel.py deleted file mode 100644 index b69bb0c686..0000000000 --- a/plugins/modules/rhn_channel.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) Vincent Van de Kussen -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: rhn_channel -short_description: Adds or removes Red Hat software channels -description: - - Adds or removes Red Hat software channels. -author: - - Vincent Van der Kussen (@vincentvdk) -notes: - - This module fetches the system id from RHN. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - description: - - Name of the software channel. - required: true - type: str - sysname: - description: - - Name of the system as it is known in RHN/Satellite. - required: true - type: str - state: - description: - - Whether the channel should be present or not, taking action if the state is different from what is stated. - default: present - choices: [ present, absent ] - type: str - url: - description: - - The full URL to the RHN/Satellite API. - required: true - type: str - user: - description: - - RHN/Satellite login. - required: true - type: str - password: - description: - - RHN/Satellite password. - aliases: [pwd] - required: true - type: str - validate_certs: - description: - - If V(false), SSL certificates will not be validated. - - This should only set to V(false) when used on self controlled sites - using self-signed certificates, and you are absolutely sure that nobody - can modify traffic between the module and the site. - type: bool - default: true - version_added: '0.2.0' -deprecated: - removed_in: 10.0.0 - why: | - RHN hosted at redhat.com was discontinued years ago, and Spacewalk 5 - (which uses RHN) is EOL since 2020, May 31st; while this module could - work on Uyuni / SUSE Manager (fork of Spacewalk 5), we have not heard - about anyone using it in those setups. - alternative: | - Contact the community.general maintainers to report the usage of this - module, and potentially step up to maintain it. -''' - -EXAMPLES = ''' -- name: Add a Red Hat software channel - community.general.rhn_channel: - name: rhel-x86_64-server-v2vwin-6 - sysname: server01 - url: https://rhn.redhat.com/rpc/api - user: rhnuser - password: guessme - delegate_to: localhost -''' - -import ssl -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -def get_systemid(client, session, sysname): - systems = client.system.listUserSystems(session) - for system in systems: - if system.get('name') == sysname: - idres = system.get('id') - idd = int(idres) - return idd - - -def subscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.append(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def unsubscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.remove(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def base_channels(client, session, sys_id): - basechan = client.channel.software.listSystemChannels(session, sys_id) - try: - chans = [item['label'] for item in basechan] - except KeyError: - chans = [item['channel_label'] for item in basechan] - return chans - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - sysname=dict(type='str', required=True), - url=dict(type='str', required=True), - user=dict(type='str', required=True), - password=dict(type='str', required=True, aliases=['pwd'], no_log=True), - validate_certs=dict(type='bool', default=True), - ) - ) - - state = module.params['state'] - channelname = module.params['name'] - systname = module.params['sysname'] - saturl = module.params['url'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - - ssl_context = None - if not validate_certs: - try: # Python 2.7.9 and newer - ssl_context = ssl.create_unverified_context() - except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default - ssl_context = ssl._create_unverified_context() - else: # Python 2.7.8 and older - ssl._create_default_https_context = ssl._create_unverified_https_context - - # initialize connection - if ssl_context: - client = xmlrpc_client.ServerProxy(saturl, context=ssl_context) - else: - client = xmlrpc_client.Server(saturl) - - try: - session = client.auth.login(user, password) - except Exception as e: - module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e)) - - if not session: - module.fail_json(msg="Failed to establish session with Satellite server.") - - # get systemid - try: - sys_id = get_systemid(client, session, systname) - except Exception as e: - module.fail_json(msg="Unable to get system id: %s " % to_text(e)) - - if not sys_id: - module.fail_json(msg="Failed to get system id.") - - # get channels for system - try: - chans = base_channels(client, session, sys_id) - except Exception as e: - module.fail_json(msg="Unable to get channel information: %s " % to_text(e)) - - try: - if state == 'present': - if channelname in chans: - module.exit_json(changed=False, msg="Channel %s already exists" % channelname) - else: - subscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s added" % channelname) - - if state == 'absent': - if channelname not in chans: - module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) - else: - unsubscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s removed" % channelname) - except Exception as e: - module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e))) - finally: - client.auth.logout(session) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/rhn_register.py b/plugins/modules/rhn_register.py deleted file mode 100644 index cd1b708e48..0000000000 --- a/plugins/modules/rhn_register.py +++ /dev/null @@ -1,465 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) James Laska -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: rhn_register -short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command -description: - - Manage registration to the Red Hat Network. -author: - - James Laska (@jlaska) -notes: - - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead. - - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey. -requirements: - - rhnreg_ks - - either libxml2 or lxml -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - state: - description: - - Whether to register (V(present)), or unregister (V(absent)) a system. - type: str - choices: [ absent, present ] - default: present - username: - description: - - Red Hat Network username. - type: str - password: - description: - - Red Hat Network password. - type: str - server_url: - description: - - Specify an alternative Red Hat Network server URL. - - The default is the current value of C(serverURL) from C(/etc/sysconfig/rhn/up2date). - type: str - activationkey: - description: - - Supply an activation key for use with registration. - type: str - profilename: - description: - - Supply an profilename for use with registration. - type: str - force: - description: - - Force registration, even if system is already registered. - type: bool - default: false - version_added: 2.0.0 - ca_cert: - description: - - Supply a custom ssl CA certificate file for use with registration. - type: path - aliases: [ sslcacert ] - systemorgid: - description: - - Supply an organizational id for use with registration. - type: str - channels: - description: - - Optionally specify a list of channels to subscribe to upon successful registration. - type: list - elements: str - default: [] - enable_eus: - description: - - If V(false), extended update support will be requested. - type: bool - default: false - nopackages: - description: - - If V(true), the registered node will not upload its installed packages information to Satellite server. - type: bool - default: false -deprecated: - removed_in: 10.0.0 - why: | - RHN hosted at redhat.com was discontinued years ago, and Spacewalk 5 - (which uses RHN) is EOL since 2020, May 31st; while this module could - work on Uyuni / SUSE Manager (fork of Spacewalk 5), we have not heard - about anyone using it in those setups. - alternative: | - Contact the community.general maintainers to report the usage of this - module, and potentially step up to maintain it. -''' - -EXAMPLES = r''' -- name: Unregister system from RHN - community.general.rhn_register: - state: absent - username: joe_user - password: somepass - -- name: Register as user with password and auto-subscribe to available content - community.general.rhn_register: - state: present - username: joe_user - password: somepass - -- name: Register with activationkey and enable extended update support - community.general.rhn_register: - state: present - activationkey: 1-222333444 - enable_eus: true - -- name: Register with activationkey and set a profilename which may differ from the hostname - community.general.rhn_register: - state: present - activationkey: 1-222333444 - profilename: host.example.com.custom - -- name: Register as user with password against a satellite server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - -- name: Register as user with password and enable channels - community.general.rhn_register: - state: present - username: joe_user - password: somepass - channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 - -- name: Force-register as user with password to ensure registration is current on server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - force: true -''' - -RETURN = r''' -# Default return values -''' - -import os -import sys - -# Attempt to import rhn client tools -sys.path.insert(0, '/usr/share/rhn') -try: - import up2date_client - import up2date_client.config - HAS_UP2DATE_CLIENT = True -except ImportError: - HAS_UP2DATE_CLIENT = False - -# INSERT REDHAT SNIPPETS -from ansible_collections.community.general.plugins.module_utils import redhat -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import urllib, xmlrpc_client - - -class Rhn(redhat.RegistrationBase): - - def __init__(self, module=None, username=None, password=None): - redhat.RegistrationBase.__init__(self, module, username, password) - self.config = self.load_config() - self.server = None - self.session = None - - def logout(self): - if self.session is not None: - self.server.auth.logout(self.session) - - def load_config(self): - ''' - Read configuration from /etc/sysconfig/rhn/up2date - ''' - if not HAS_UP2DATE_CLIENT: - return None - - config = up2date_client.config.initUp2dateConfig() - - return config - - @property - def server_url(self): - return self.config['serverURL'] - - @property - def hostname(self): - ''' - Return the non-xmlrpc RHN hostname. This is a convenience method - used for displaying a more readable RHN hostname. - - Returns: str - ''' - url = urllib.parse.urlparse(self.server_url) - return url[1].replace('xmlrpc.', '') - - @property - def systemid(self): - systemid = None - xpath_str = "//member[name='system_id']/value/string" - - if os.path.isfile(self.config['systemIdPath']): - fd = open(self.config['systemIdPath'], 'r') - xml_data = fd.read() - fd.close() - - # Ugh, xml parsing time ... - # First, try parsing with libxml2 ... - if systemid is None: - try: - import libxml2 - doc = libxml2.parseDoc(xml_data) - ctxt = doc.xpathNewContext() - systemid = ctxt.xpathEval(xpath_str)[0].content - doc.freeDoc() - ctxt.xpathFreeContext() - except ImportError: - pass - - # m-kay, let's try with lxml now ... - if systemid is None: - try: - from lxml import etree - root = etree.fromstring(xml_data) - systemid = root.xpath(xpath_str)[0].text - except ImportError: - raise Exception('"libxml2" or "lxml" is required for this module.') - - # Strip the 'ID-' prefix - if systemid is not None and systemid.startswith('ID-'): - systemid = systemid[3:] - - return int(systemid) - - @property - def is_registered(self): - ''' - Determine whether the current system is registered. - - Returns: True|False - ''' - return os.path.isfile(self.config['systemIdPath']) - - def configure_server_url(self, server_url): - ''' - Configure server_url for registration - ''' - - self.config.set('serverURL', server_url) - self.config.save() - - def enable(self): - ''' - Prepare the system for RHN registration. This includes ... - * enabling the rhnplugin yum plugin - * disabling the subscription-manager yum plugin - ''' - redhat.RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', True) - self.update_plugin_conf('subscription-manager', False) - - def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False): - ''' - Register system to RHN. If enable_eus=True, extended update - support will be requested. - ''' - register_cmd = ['/usr/sbin/rhnreg_ks', '--force'] - if self.username: - register_cmd.extend(['--username', self.username, '--password', self.password]) - if self.server_url: - register_cmd.extend(['--serverUrl', self.server_url]) - if enable_eus: - register_cmd.append('--use-eus-channel') - if nopackages: - register_cmd.append('--nopackages') - if activationkey is not None: - register_cmd.extend(['--activationkey', activationkey]) - if profilename is not None: - register_cmd.extend(['--profilename', profilename]) - if sslcacert is not None: - register_cmd.extend(['--sslCACert', sslcacert]) - if systemorgid is not None: - register_cmd.extend(['--systemorgid', systemorgid]) - rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True) - - def api(self, method, *args): - ''' - Convenience RPC wrapper - ''' - if self.server is None: - if self.hostname != 'rhn.redhat.com': - url = "https://%s/rpc/api" % self.hostname - else: - url = "https://xmlrpc.%s/rpc/api" % self.hostname - self.server = xmlrpc_client.ServerProxy(url) - self.session = self.server.auth.login(self.username, self.password) - - func = getattr(self.server, method) - return func(self.session, *args) - - def unregister(self): - ''' - Unregister a previously registered system - ''' - - # Initiate RPC connection - self.api('system.deleteSystems', [self.systemid]) - - # Remove systemid file - os.unlink(self.config['systemIdPath']) - - def subscribe(self, channels): - if not channels: - return - - if self._is_hosted(): - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) - - else: - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - current_channels = [item['label'] for item in current_channels] - new_base = None - new_childs = [] - for ch in channels: - if ch in current_channels: - continue - if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': - new_base = ch - else: - if ch not in new_childs: - new_childs.append(ch) - out_base = 0 - out_childs = 0 - - if new_base: - out_base = self.api('system.setBaseChannel', self.systemid, new_base) - - if new_childs: - out_childs = self.api('system.setChildChannels', self.systemid, new_childs) - - return out_base and out_childs - - def _is_hosted(self): - ''' - Return True if we are running against Hosted (rhn.redhat.com) or - False otherwise (when running against Satellite or Spacewalk) - ''' - return 'rhn.redhat.com' in self.hostname - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - username=dict(type='str'), - password=dict(type='str', no_log=True), - server_url=dict(type='str'), - activationkey=dict(type='str', no_log=True), - profilename=dict(type='str'), - ca_cert=dict(type='path', aliases=['sslcacert']), - systemorgid=dict(type='str'), - enable_eus=dict(type='bool', default=False), - force=dict(type='bool', default=False), - nopackages=dict(type='bool', default=False), - channels=dict(type='list', elements='str', default=[]), - ), - # username/password is required for state=absent, or if channels is not empty - # (basically anything that uses self.api requires username/password) but it doesn't - # look like we can express that with required_if/required_together/mutually_exclusive - - # only username+password can be used for unregister - required_if=[['state', 'absent', ['username', 'password']]], - ) - - if not HAS_UP2DATE_CLIENT: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") - - server_url = module.params['server_url'] - username = module.params['username'] - password = module.params['password'] - - state = module.params['state'] - force = module.params['force'] - activationkey = module.params['activationkey'] - profilename = module.params['profilename'] - sslcacert = module.params['ca_cert'] - systemorgid = module.params['systemorgid'] - channels = module.params['channels'] - enable_eus = module.params['enable_eus'] - nopackages = module.params['nopackages'] - - rhn = Rhn(module=module, username=username, password=password) - - # use the provided server url and persist it to the rhn config. - if server_url: - rhn.configure_server_url(server_url) - - if not rhn.server_url: - module.fail_json( - msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)" - ) - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or rhn.username or rhn.password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, - rhn.password)) - if not activationkey and not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered and not force: - module.exit_json(changed=False, msg="System already registered.") - - try: - rhn.enable() - rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages) - rhn.subscribe(channels) - except Exception as exc: - module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc)) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - - if not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password") - - try: - rhn.unregister() - except Exception as exc: - module.fail_json(msg="Failed to unregister: %s" % exc) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index d7828ebabb..07ca189e81 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -27,7 +27,6 @@ IGNORE_NO_MAINTAINERS = [ 'plugins/callback/cgroup_memory_recap.py', 'plugins/callback/context_demo.py', 'plugins/callback/counter_enabled.py', - 'plugins/callback/hipchat.py', 'plugins/callback/jabber.py', 'plugins/callback/log_plays.py', 'plugins/callback/logdna.py', diff --git a/tests/unit/plugins/modules/rhn_conftest.py b/tests/unit/plugins/modules/rhn_conftest.py deleted file mode 100644 index acc0e2f221..0000000000 --- a/tests/unit/plugins/modules/rhn_conftest.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils.six.moves import xmlrpc_client - -import pytest - - -def get_method_name(request_body): - return xmlrpc_client.loads(request_body)[1] - - -@pytest.fixture -def mock_request(request, mocker): - responses = request.getfixturevalue('testcase')['calls'] - module_name = request.module.TESTED_MODULE - - def transport_request(host, handler, request_body, verbose=0): - """Fake request""" - method_name = get_method_name(request_body) - excepted_name, response = responses.pop(0) - if method_name == excepted_name: - if isinstance(response, Exception): - raise response - else: - return response - else: - raise Exception('Expected call: %r, called with: %r' % (excepted_name, method_name)) - - target = '{0}.xmlrpc_client.Transport.request'.format(module_name) - mocker.patch(target, side_effect=transport_request) diff --git a/tests/unit/plugins/modules/test_rhn_channel.py b/tests/unit/plugins/modules/test_rhn_channel.py deleted file mode 100644 index fd3bdc5fe0..0000000000 --- a/tests/unit/plugins/modules/test_rhn_channel.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Pierre-Louis Bonicoli -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json - -from ansible_collections.community.general.plugins.modules import rhn_channel - -from .rhn_conftest import mock_request # noqa: F401, pylint: disable=unused-import - -import pytest - - -pytestmark = pytest.mark.usefixtures('patch_ansible_module') - - -@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module']) -def test_without_required_parameters(capfd): - with pytest.raises(SystemExit): - rhn_channel.main() - out, err = capfd.readouterr() - results = json.loads(out) - assert results['failed'] - assert 'missing required arguments' in results['msg'] - - -TESTED_MODULE = rhn_channel.__name__ -TEST_CASES = [ - [ - # add channel already added, check that result isn't changed - { - 'name': 'rhel-x86_64-server-6', - 'sysname': 'server01', - 'url': 'https://rhn.redhat.com/rpc/api', - 'user': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.listUserSystems', - [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]), - ('channel.software.listSystemChannels', - [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]), - ('auth.logout', [1]), - ], - 'changed': False, - 'msg': 'Channel rhel-x86_64-server-6 already exists', - } - ], - [ - # add channel, check that result is changed - { - 'name': 'rhel-x86_64-server-6-debuginfo', - 'sysname': 'server01', - 'url': 'https://rhn.redhat.com/rpc/api', - 'user': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.listUserSystems', - [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]), - ('channel.software.listSystemChannels', - [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]), - ('channel.software.listSystemChannels', - [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]), - ('system.setChildChannels', [1]), - ('auth.logout', [1]), - ], - 'changed': True, - 'msg': 'Channel rhel-x86_64-server-6-debuginfo added', - } - ], - [ - # remove inexistent channel, check that result isn't changed - { - 'name': 'rhel-x86_64-server-6-debuginfo', - 'state': 'absent', - 'sysname': 'server01', - 'url': 'https://rhn.redhat.com/rpc/api', - 'user': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.listUserSystems', - [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]), - ('channel.software.listSystemChannels', - [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]), - ('auth.logout', [1]), - ], - 'changed': False, - 'msg': 'Not subscribed to channel rhel-x86_64-server-6-debuginfo.', - } - ], - [ - # remove channel, check that result is changed - { - 'name': 'rhel-x86_64-server-6-debuginfo', - 'state': 'absent', - 'sysname': 'server01', - 'url': 'https://rhn.redhat.com/rpc/api', - 'user': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.listUserSystems', - [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]), - ('channel.software.listSystemChannels', [[ - {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'}, - {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'} - ]]), - ('channel.software.listSystemChannels', [[ - {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'}, - {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'} - ]]), - ('system.setChildChannels', [1]), - ('auth.logout', [1]), - ], - 'changed': True, - 'msg': 'Channel rhel-x86_64-server-6-debuginfo removed' - } - ] -] - - -@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module']) -def test_rhn_channel(capfd, mocker, testcase, mock_request): - """Check 'msg' and 'changed' results""" - - with pytest.raises(SystemExit): - rhn_channel.main() - - out, err = capfd.readouterr() - results = json.loads(out) - assert results['changed'] == testcase['changed'] - assert results['msg'] == testcase['msg'] - assert not testcase['calls'] # all calls should have been consumed diff --git a/tests/unit/plugins/modules/test_rhn_register.py b/tests/unit/plugins/modules/test_rhn_register.py deleted file mode 100644 index 1394c07b65..0000000000 --- a/tests/unit/plugins/modules/test_rhn_register.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os - -from ansible_collections.community.general.tests.unit.compat.mock import mock_open -from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_native -import ansible.module_utils.six -from ansible.module_utils.six.moves import xmlrpc_client -from ansible_collections.community.general.plugins.modules import rhn_register - -from .rhn_conftest import mock_request # noqa: F401, pylint: disable=unused-import - -import pytest - - -SYSTEMID = """ - - - - -system_id -ID-123456789 - - - - -""" - - -def skipWhenAllModulesMissing(modules): - """Skip the decorated test unless one of modules is available.""" - for module in modules: - try: - __import__(module) - return False - except ImportError: - continue - - return True - - -orig_import = __import__ - - -@pytest.fixture -def import_libxml(mocker): - def mock_import(name, *args, **kwargs): - if name in ['libxml2', 'libxml']: - raise ImportError() - else: - return orig_import(name, *args, **kwargs) - - if ansible.module_utils.six.PY3: - mocker.patch('builtins.__import__', side_effect=mock_import) - else: - mocker.patch('__builtin__.__import__', side_effect=mock_import) - - -@pytest.fixture -def patch_rhn(mocker): - load_config_return = { - 'serverURL': 'https://xmlrpc.rhn.redhat.com/XMLRPC', - 'systemIdPath': '/etc/sysconfig/rhn/systemid' - } - - mocker.patch.object(rhn_register.Rhn, 'load_config', return_value=load_config_return) - mocker.patch.object(rhn_register, 'HAS_UP2DATE_CLIENT', mocker.PropertyMock(return_value=True)) - - -@pytest.mark.skipif(skipWhenAllModulesMissing(['libxml2', 'libxml']), reason='none are available: libxml2, libxml') -def test_systemid_with_requirements(capfd, mocker, patch_rhn): - """Check 'msg' and 'changed' results""" - - mocker.patch.object(rhn_register.Rhn, 'enable') - mock_isfile = mocker.patch('os.path.isfile', return_value=True) - mocker.patch('ansible_collections.community.general.plugins.modules.rhn_register.open', mock_open(read_data=SYSTEMID), create=True) - rhn = rhn_register.Rhn() - assert '123456789' == to_native(rhn.systemid) - - -@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') -def test_systemid_requirements_missing(capfd, mocker, patch_rhn, import_libxml): - """Check that missing dependencies are detected""" - - mocker.patch('os.path.isfile', return_value=True) - mocker.patch('ansible_collections.community.general.plugins.modules.rhn_register.open', mock_open(read_data=SYSTEMID), create=True) - - with pytest.raises(SystemExit): - rhn_register.main() - - out, err = capfd.readouterr() - results = json.loads(out) - assert results['failed'] - assert 'Missing arguments' in results['msg'] - - -@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') -def test_without_required_parameters(capfd, patch_rhn): - """Failure must occurs when all parameters are missing""" - - with pytest.raises(SystemExit): - rhn_register.main() - out, err = capfd.readouterr() - results = json.loads(out) - assert results['failed'] - assert 'Missing arguments' in results['msg'] - - -TESTED_MODULE = rhn_register.__name__ -TEST_CASES = [ - [ - # Registering an unregistered host with channels - { - 'channels': 'rhel-x86_64-server-6', - 'username': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('channel.software.listSystemChannels', - [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]), - ('channel.software.setSystemChannels', [1]), - ('auth.logout', [1]), - ], - 'is_registered': False, - 'is_registered.call_count': 1, - 'enable.call_count': 1, - 'systemid.call_count': 2, - 'changed': True, - 'msg': "System successfully registered to 'rhn.redhat.com'.", - 'run_command.call_count': 1, - 'run_command.call_args': '/usr/sbin/rhnreg_ks', - 'request_called': True, - 'unlink.call_count': 0, - } - ], - [ - # Registering an unregistered host without channels - { - 'activationkey': 'key', - 'username': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ], - 'is_registered': False, - 'is_registered.call_count': 1, - 'enable.call_count': 1, - 'systemid.call_count': 0, - 'changed': True, - 'msg': "System successfully registered to 'rhn.redhat.com'.", - 'run_command.call_count': 1, - 'run_command.call_args': '/usr/sbin/rhnreg_ks', - 'request_called': False, - 'unlink.call_count': 0, - } - ], - [ - # Register an host already registered, check that result is unchanged - { - 'activationkey': 'key', - 'username': 'user', - 'password': 'pass', - }, - { - 'calls': [ - ], - 'is_registered': True, - 'is_registered.call_count': 1, - 'enable.call_count': 0, - 'systemid.call_count': 0, - 'changed': False, - 'msg': 'System already registered.', - 'run_command.call_count': 0, - 'request_called': False, - 'unlink.call_count': 0, - }, - ], - [ - # Unregister an host, check that result is changed - { - 'activationkey': 'key', - 'username': 'user', - 'password': 'pass', - 'state': 'absent', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.deleteSystems', [1]), - ('auth.logout', [1]), - ], - 'is_registered': True, - 'is_registered.call_count': 1, - 'enable.call_count': 0, - 'systemid.call_count': 1, - 'changed': True, - 'msg': 'System successfully unregistered from rhn.redhat.com.', - 'run_command.call_count': 0, - 'request_called': True, - 'unlink.call_count': 1, - } - ], - [ - # Unregister a unregistered host (systemid missing) locally, check that result is unchanged - { - 'activationkey': 'key', - 'username': 'user', - 'password': 'pass', - 'state': 'absent', - }, - { - 'calls': [], - 'is_registered': False, - 'is_registered.call_count': 1, - 'enable.call_count': 0, - 'systemid.call_count': 0, - 'changed': False, - 'msg': 'System already unregistered.', - 'run_command.call_count': 0, - 'request_called': False, - 'unlink.call_count': 0, - } - - ], - [ - # Unregister an unknown host (an host with a systemid available locally, check that result contains failed - { - 'activationkey': 'key', - 'username': 'user', - 'password': 'pass', - 'state': 'absent', - }, - { - 'calls': [ - ('auth.login', ['X' * 43]), - ('system.deleteSystems', xmlrpc_client.Fault(1003, 'The following systems were NOT deleted: 123456789')), - ('auth.logout', [1]), - ], - 'is_registered': True, - 'is_registered.call_count': 1, - 'enable.call_count': 0, - 'systemid.call_count': 1, - 'failed': True, - 'msg': "Failed to unregister: ", - 'run_command.call_count': 0, - 'request_called': True, - 'unlink.call_count': 0, - } - ], -] - - -@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') -def test_register_parameters(mocker, capfd, mock_request, patch_rhn, testcase): - # successful execution, no output - mocker.patch.object(basic.AnsibleModule, 'run_command', return_value=(0, '', '')) - mock_is_registered = mocker.patch.object(rhn_register.Rhn, 'is_registered', mocker.PropertyMock(return_value=testcase['is_registered'])) - mocker.patch.object(rhn_register.Rhn, 'enable') - mock_systemid = mocker.patch.object(rhn_register.Rhn, 'systemid', mocker.PropertyMock(return_value=12345)) - mocker.patch('os.unlink', return_value=True) - - with pytest.raises(SystemExit): - rhn_register.main() - - assert basic.AnsibleModule.run_command.call_count == testcase['run_command.call_count'] - if basic.AnsibleModule.run_command.call_count: - assert basic.AnsibleModule.run_command.call_args[0][0][0] == testcase['run_command.call_args'] - - assert mock_is_registered.call_count == testcase['is_registered.call_count'] - assert rhn_register.Rhn.enable.call_count == testcase['enable.call_count'] - assert mock_systemid.call_count == testcase['systemid.call_count'] - assert xmlrpc_client.Transport.request.called == testcase['request_called'] - assert os.unlink.call_count == testcase['unlink.call_count'] - - out, err = capfd.readouterr() - results = json.loads(out) - assert results.get('changed') == testcase.get('changed') - assert results.get('failed') == testcase.get('failed') - assert results['msg'] == testcase['msg'] - assert not testcase['calls'] # all calls should have been consumed From 8df9d0d7dee12391a65b868d390b2dcd3e3803c6 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Thu, 10 Oct 2024 23:02:51 +0300 Subject: [PATCH 286/482] one_host: Fix ID logic (#8907) * Fix one_host module * Add CHANGELOG fragment * PR Fixes * Update exceptions --- changelogs/fragments/8907-fix-one-host-id.yml | 2 + plugins/modules/one_host.py | 59 +++++++++++-------- 2 files changed, 38 insertions(+), 23 deletions(-) create mode 100644 changelogs/fragments/8907-fix-one-host-id.yml diff --git a/changelogs/fragments/8907-fix-one-host-id.yml b/changelogs/fragments/8907-fix-one-host-id.yml new file mode 100644 index 0000000000..78fc4080b1 --- /dev/null +++ b/changelogs/fragments/8907-fix-one-host-id.yml @@ -0,0 +1,2 @@ +bugfixes: + - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907). diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py index eea1121733..6188f3d0f7 100644 --- a/plugins/modules/one_host.py +++ b/plugins/modules/one_host.py @@ -152,16 +152,19 @@ class HostModule(OpenNebulaModule): def allocate_host(self): """ Creates a host entry in OpenNebula + self.one.host.allocate returns ID of a host Returns: True on success, fails otherwise. """ - if not self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')): - self.fail(msg="could not allocate host") - else: + try: + self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')) self.result['changed'] = True + except Exception as e: + self.fail(msg="Could not allocate host, ERROR: " + str(e)) + return True def wait_for_host_state(self, host, target_states): @@ -221,11 +224,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be put in disabled state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.DISABLED): - self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.DISABLED) result['changed'] = True - else: - self.fail(msg="could not disable host") + except Exception as e: + self.fail(msg="Could not disable host, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) elif current_state in [HOST_STATES.DISABLED]: pass else: @@ -235,11 +240,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be placed in offline state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: - if one.host.status(host.ID, HOST_STATUS.OFFLINE): - self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.OFFLINE) result['changed'] = True - else: - self.fail(msg="could not set host offline") + except Exception as e: + self.fail(msg="Could not set host offline, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) elif current_state in [HOST_STATES.OFFLINE]: pass else: @@ -247,10 +254,12 @@ class HostModule(OpenNebulaModule): elif desired_state == 'absent': if current_state != HOST_ABSENT: - if one.host.delete(host.ID): + # returns host ID integer + try: + one.host.delete(host.ID) result['changed'] = True - else: - self.fail(msg="could not delete host from cluster") + except Exception as e: + self.fail(msg="Could not delete host from cluster, ERROR: " + str(e)) # if we reach this point we can assume that the host was taken to the desired state @@ -268,17 +277,21 @@ class HostModule(OpenNebulaModule): if self.requires_template_update(host.TEMPLATE, desired_template_changes): # setup the root element so that pyone will generate XML instead of attribute vector desired_template_changes = {"TEMPLATE": desired_template_changes} - if one.host.update(host.ID, desired_template_changes, 1): # merge the template + # merge the template, returns host ID integer + try: + one.host.update(host.ID, desired_template_changes, 1) result['changed'] = True - else: - self.fail(msg="failed to update the host template") + except Exception as e: + self.fail(msg="Failed to update the host template, ERROR: " + str(e)) # the cluster if host.CLUSTER_ID != self.get_parameter('cluster_id'): - if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): + # returns cluster id in int + try: + one.cluster.addhost(self.get_parameter('cluster_id'), host.ID) result['changed'] = True - else: - self.fail(msg="failed to update the host cluster") + except Exception as e: + self.fail(msg="Failed to update the host cluster, ERROR: " + str(e)) # return self.exit() From 3de46821939bf767ba9fac337e2f72f1b779fe50 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Thu, 10 Oct 2024 23:03:30 +0300 Subject: [PATCH 287/482] krb_ticket: Create module (#8953) * Add kutils module * PR Fixes * PR Fixes 2 * PR Fixes * Fix executables * Fix comment * Fix functions * PR Fix * PR Fix 2 * Fix list name * Fix list name 2 * Rever check_for_none func * Rever check_for_none func 2 * Update tests * Update tests 2 * Fix principal * Fix cmdrunner args * Fix multiline * Fix backslash * Fix tests * Fix elif * Fix bool arg * Update doc * Fix doc * Add man reference * Fix doc YAML-quoting * PR Fixes * Fix indent * Fix version_added and name * Fix units name * Fix module name --- .github/BOTMETA.yml | 2 + plugins/modules/krb_ticket.py | 378 ++++++++++++++++++ tests/unit/plugins/modules/test_krb_ticket.py | 14 + .../unit/plugins/modules/test_krb_ticket.yaml | 109 +++++ 4 files changed, 503 insertions(+) create mode 100644 plugins/modules/krb_ticket.py create mode 100644 tests/unit/plugins/modules/test_krb_ticket.py create mode 100644 tests/unit/plugins/modules/test_krb_ticket.yaml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index bcf300025f..fac8adad78 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -830,6 +830,8 @@ files: maintainers: ahussey-redhat $modules/kibana_plugin.py: maintainers: barryib + $modules/krb_ticket.py: + maintainers: abakanovskii $modules/launchd.py: maintainers: martinm82 $modules/layman.py: diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py new file mode 100644 index 0000000000..8894a64ef6 --- /dev/null +++ b/plugins/modules/krb_ticket.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: krb_ticket +short_description: Kerberos utils for managing tickets +version_added: 10.0.0 +description: + - Manage Kerberos tickets with C(kinit), C(klist) and C(kdestroy) base utilities. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/user/user_commands/index.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + password: + description: + - Principal password. + - It is required to specify O(password) or O(keytab_path). + type: str + principal: + description: + - The principal name. + - If not set, the user running this module will be used. + type: str + state: + description: + - The state of the Kerberos ticket. + - V(present) is equivalent of C(kinit) command. + - V(absent) is equivalent of C(kdestroy) command. + type: str + default: present + choices: ["present", "absent"] + kdestroy_all: + description: + - When O(state=absent) destroys all credential caches in collection. + - Equivalent of running C(kdestroy -A). + type: bool + cache_name: + description: + - Use O(cache_name) as the ticket cache name and location. + - If this option is not used, the default cache name and location are used. + - The default credentials cache may vary between systems. + - If not set the the value of E(KRB5CCNAME) environment variable will be used instead, its value is used to name the default ticket cache. + type: str + lifetime: + description: + - Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used. + - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) will not override the configured maximum ticket lifetime. + - "The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h) - hours, V(d) - days." + - You cannot mix units; a value of V(3h30m) will result in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + start_time: + description: + - Requests a postdated ticket. + - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before use. + - O(start_time) specifies the duration of the delay before the ticket can become valid. + - You can use absolute time formats, for example V(July 27, 2012 at 20:30) you would neet to set O(start_time=20120727203000). + - You can also use time duration format similar to O(lifetime) or O(renewable). + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + renewable: + description: + - Requests renewable tickets, with a total lifetime equal to O(renewable). + - "The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h) - hours, V(d) - days." + - You cannot mix units; a value of V(3h30m) will result in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + forwardable: + description: + - Request forwardable or non-forwardable tickets. + type: bool + proxiable: + description: + - Request proxiable or non-proxiable tickets. + type: bool + address_restricted: + description: + - Request tickets restricted to the host's local address or non-restricted. + type: bool + anonymous: + description: + - Requests anonymous processing. + type: bool + canonicalization: + description: + - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from the one requested. + type: bool + enterprise: + description: + - Treats the principal name as an enterprise name (implies the O(canonicalization) option). + type: bool + renewal: + description: + - Requests renewal of the ticket-granting ticket. + - Note that an expired ticket cannot be renewed, even if the ticket is still within its renewable life. + type: bool + validate: + description: + - Requests that the ticket-granting ticket in the cache (with the invalid flag set) be passed to the KDC for validation. + - If the ticket is within its requested time range, the cache is replaced with the validated ticket. + type: bool + keytab: + description: + - Requests a ticket, obtained from a key in the local host's keytab. + - If O(keytab_path) is not specified will try to use default client keytab path (C(-i) option). + type: bool + keytab_path: + description: + - Use when O(keytab=true) to specify path to a keytab file. + - It is required to specify O(password) or O(keytab_path). + type: path +requirements: + - krb5-user and krb5-config packages +extends_documentation_fragment: + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: some_password + +- name: Get Kerberos ticket using keytab + community.general.krb_ticket: + keytab: true + keytab_path: /etc/ipa/file.keytab + +- name: Get Kerberos ticket with a lifetime of 7 days + community.general.krb_ticket: + password: some_password + lifetime: 7d + +- name: Get Kerberos ticket with a starting time of July 2, 2024, 1:35:30 p.m. + community.general.krb_ticket: + password: some_password + start_time: "240702133530" + +- name: Get Kerberos ticket using principal name + community.general.krb_ticket: + password: some_password + principal: admin + +- name: Get Kerberos ticket using principal with realm + community.general.krb_ticket: + password: some_password + principal: admin@IPA.TEST + +- name: Check for existence by ticket cache + community.general.krb_ticket: + cache_name: KEYRING:persistent:0:0 + +- name: Make sure default ticket is destroyed + community.general.krb_ticket: + state: absent + +- name: Make sure specific ticket destroyed by principal + community.general.krb_ticket: + state: absent + principal: admin@IPA.TEST + +- name: Make sure specific ticket destroyed by cache_name + community.general.krb_ticket: + state: absent + cache_name: KEYRING:persistent:0:0 + +- name: Make sure all tickets are destroyed + community.general.krb_ticket: + state: absent + kdestroy_all: true +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.password = kwargs['password'] + self.principal = kwargs['principal'] + self.state = kwargs['state'] + self.kdestroy_all = kwargs['kdestroy_all'] + self.cache_name = kwargs['cache_name'] + self.start_time = kwargs['start_time'] + self.renewable = kwargs['renewable'] + self.forwardable = kwargs['forwardable'] + self.proxiable = kwargs['proxiable'] + self.address_restricted = kwargs['address_restricted'] + self.canonicalization = kwargs['canonicalization'] + self.enterprise = kwargs['enterprise'] + self.renewal = kwargs['renewal'] + self.validate = kwargs['validate'] + self.keytab = kwargs['keytab'] + self.keytab_path = kwargs['keytab_path'] + + self.kinit = CmdRunner( + module, + command='kinit', + arg_formats=dict( + lifetime=cmd_runner_fmt.as_opt_val('-l'), + start_time=cmd_runner_fmt.as_opt_val('-s'), + renewable=cmd_runner_fmt.as_opt_val('-r'), + forwardable=cmd_runner_fmt.as_bool('-f', '-F', ignore_none=True), + proxiable=cmd_runner_fmt.as_bool('-p', '-P', ignore_none=True), + address_restricted=cmd_runner_fmt.as_bool('-a', '-A', ignore_none=True), + anonymous=cmd_runner_fmt.as_bool('-n'), + canonicalization=cmd_runner_fmt.as_bool('-C'), + enterprise=cmd_runner_fmt.as_bool('-E'), + renewal=cmd_runner_fmt.as_bool('-R'), + validate=cmd_runner_fmt.as_bool('-v'), + keytab=cmd_runner_fmt.as_bool('-k'), + keytab_path=cmd_runner_fmt.as_func(lambda v: ['-t', v] if v else ['-i']), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_list(), + ) + ) + + self.kdestroy = CmdRunner( + module, + command='kdestroy', + arg_formats=dict( + kdestroy_all=cmd_runner_fmt.as_bool('-A'), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_opt_val('-p'), + ) + ) + + self.klist = CmdRunner( + module, + command='klist', + arg_formats=dict( + show_list=cmd_runner_fmt.as_bool('-l'), + ) + ) + + def exec_kinit(self): + params = dict(self.module.params) + with self.kinit( + "lifetime start_time renewable forwardable proxiable address_restricted anonymous " + "canonicalization enterprise renewal validate keytab keytab_path cache_name principal", + check_rc=True, + data=self.password, + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_kdestroy(self): + params = dict(self.module.params) + with self.kdestroy( + "kdestroy_all cache_name principal", + check_rc=True + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_klist(self, show_list): + # Use chech_rc = False because + # If no tickets present, klist command will always return rc = 1 + params = dict(show_list=show_list) + with self.klist( + "show_list", + check_rc=False + ) as ctx: + rc, out, err = ctx.run(**params) + return rc, out, err + + def check_ticket_present(self): + ticket_present = True + show_list = False + + if not self.principal and not self.cache_name: + rc, out, err = self.exec_klist(show_list) + if rc != 0: + ticket_present = False + else: + show_list = True + rc, out, err = self.exec_klist(show_list) + if self.principal and self.principal not in str(out): + ticket_present = False + if self.cache_name and self.cache_name not in str(out): + ticket_present = False + + return ticket_present + + +def main(): + arg_spec = dict( + principal=dict(type='str'), + password=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + kdestroy_all=dict(type='bool'), + cache_name=dict(type='str', fallback=(env_fallback, ['KRB5CCNAME'])), + lifetime=dict(type='str'), + start_time=dict(type='str'), + renewable=dict(type='str'), + forwardable=dict(type='bool'), + proxiable=dict(type='bool'), + address_restricted=dict(type='bool'), + anonymous=dict(type='bool'), + canonicalization=dict(type='bool'), + enterprise=dict(type='bool'), + renewal=dict(type='bool'), + validate=dict(type='bool'), + keytab=dict(type='bool'), + keytab_path=dict(type='path'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_by={ + 'keytab_path': 'keytab' + }, + required_if=[ + ('state', 'present', ('password', 'keytab_path'), True), + ], + ) + + state = module.params['state'] + kdestroy_all = module.params['kdestroy_all'] + + keytab = IPAKeytab(module, + state=state, + kdestroy_all=kdestroy_all, + principal=module.params['principal'], + password=module.params['password'], + cache_name=module.params['cache_name'], + lifetime=module.params['lifetime'], + start_time=module.params['start_time'], + renewable=module.params['renewable'], + forwardable=module.params['forwardable'], + proxiable=module.params['proxiable'], + address_restricted=module.params['address_restricted'], + anonymous=module.params['anonymous'], + canonicalization=module.params['canonicalization'], + enterprise=module.params['enterprise'], + renewal=module.params['renewal'], + validate=module.params['validate'], + keytab=module.params['keytab'], + keytab_path=module.params['keytab_path'], + ) + + if module.params['keytab_path'] is not None and module.params['keytab'] is not True: + module.fail_json(msg="If keytab_path is specified then keytab parameter must be True") + + changed = False + if state == 'present': + if not keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kinit() + + if state == 'absent': + if kdestroy_all: + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + elif keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_krb_ticket.py b/tests/unit/plugins/modules/test_krb_ticket.py new file mode 100644 index 0000000000..8c17e2e43b --- /dev/null +++ b/tests/unit/plugins/modules/test_krb_ticket.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import krb_ticket +from .helper import Helper, RunCommandMock # pylint: disable=unused-import + + +Helper.from_module(krb_ticket, __name__) diff --git a/tests/unit/plugins/modules/test_krb_ticket.yaml b/tests/unit/plugins/modules/test_krb_ticket.yaml new file mode 100644 index 0000000000..9882bf137d --- /dev/null +++ b/tests/unit/plugins/modules/test_krb_ticket.yaml @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: test_kinit_default + input: + state: present + password: cool_password + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + rc: 1 + out: "" + err: "" + - command: [/testbin/kinit] + environ: &env-data {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true, data: cool_password} + rc: 0 + out: "" + err: "" +- id: test_kinit_principal + input: + state: present + password: cool_password + principal: admin@IPA.TEST + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/kinit, admin@IPA.TEST] + environ: *env-data + rc: 0 + out: "" + err: "" +- id: test_kdestroy_default + input: + state: absent + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/kdestroy] + environ: &env-norc {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "" + err: "" +- id: test_kdestroy_principal + input: + state: absent + principal: admin@IPA.TEST + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: "admin@IPA.TEST" + err: "" + - command: [/testbin/kdestroy, -p, admin@IPA.TEST] + environ: *env-norc + rc: 0 + out: "" + err: "" +- id: test_kdestroy_cache_name + input: + state: absent + cache_name: KEYRING:persistent:0:0 + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: "KEYRING:persistent:0:0" + err: "" + - command: [/testbin/kdestroy, -c, KEYRING:persistent:0:0] + environ: *env-norc + rc: 0 + out: "" + err: "" +- id: test_kdestroy_all + input: + state: absent + kdestroy_all: true + output: + changed: true + mocks: + run_command: + - command: [/testbin/kdestroy, -A] + environ: *env-norc + rc: 0 + out: "" + err: "" From 67ddb567c90f6a03dd3a9a3e2c4873c415b07e0d Mon Sep 17 00:00:00 2001 From: Ruben Bosch <8641284+Rubueno@users.noreply.github.com> Date: Thu, 10 Oct 2024 22:04:04 +0200 Subject: [PATCH 288/482] Dell PwrButton requires a job initated at reboot (#9012) Dell systems do not change the bios setting PwrButton right away. The command will return changed=true, but it is not applied. Also no job is scheduled at next reboot for the change to take place. This patch aims to fix this issue. --- ...ll-pwrbutton-requires-a-job-initiated-at-reboot.yml | 4 ++++ plugins/module_utils/redfish_utils.py | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml diff --git a/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml b/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml new file mode 100644 index 0000000000..131ee68c7c --- /dev/null +++ b/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml @@ -0,0 +1,4 @@ +minor_changes: + - redfish_utils module utils - schedule a BIOS configuration job at next + reboot when the BIOS config is changed + (https://github.com/ansible-collections/community.general/pull/9012). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 28d6f2ef0c..2ef928e510 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -2311,11 +2311,19 @@ class RedfishUtils(object): # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} + + # WORKAROUND + # Dell systems require manually setting the apply time to "OnReset" + # to spawn a proprietary job to apply the BIOS settings + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'msg': "Modified BIOS attributes %s. A reboot is required" % (attrs_to_patch), 'warning': warning} def set_boot_order(self, boot_list): From 410288401b693da194cae2102df223e8b2fb60c4 Mon Sep 17 00:00:00 2001 From: TobiasZeuch181 Date: Sat, 12 Oct 2024 10:05:34 +0200 Subject: [PATCH 289/482] Add zypper_repository_info module (#8778) * Adding the list-option for reading the registered repodata * adding a parameter list * if the parameter is set, return the output (as a dictionary) * adjusting the documentation for the new parameter * Adding changelot-fragment and link to the issue in the issue-tracker https://github.com/ansible-collections/community.general/issues/8777 * Adding description and example for the new list parameter * Adding type for new parameter list to argument_spec * Revert "Adding type for new parameter list to argument_spec" This reverts commit 6b86e8ba447a9b53e434c79d33c7fe260485b342. * Revert "Adding description and example for the new list parameter" This reverts commit db06dafb958f6aeffa38adeee85623904b26bbb1. * Adding a new module for reading the list of regiestered repositories I created a new module for reading the repositories based on a sub-procedure from the existing zypper_repository * Removing the changes to module zypper_repository because the list-option is not supposed to go into this module * removing the last change from the zypper_repository module * Fixing linter errors Adding extra lines before definitions adding the missing dash in the documentation removing the unused imports * Adding maintainer under BOTMETA, adding test and fixing suggestions I added the maintainer to BOTMETA I applied the suggestions on the code form the review I added a test for the new module to the existing zypper_repository-tests * Adding maintainer under BOTMETA, adding test and fixing suggestions I added the maintainer to BOTMETA I applied the suggestions on the code form the review I added a test for the new module to the existing zypper_repository-tests * Deleting the fragment because this will be created automatically I deleted the fragment because this will be created automatically based on the short_description and version_added * removing foreign commits from history that accidentily sneaked in with the last rebase * Update plugins/modules/zypper_repository_info.py Co-authored-by: Felix Fontein * Update .github/BOTMETA.yml Co-authored-by: Felix Fontein * Update plugins/modules/zypper_repository_info.py Co-authored-by: Felix Fontein * Update plugins/modules/zypper_repository_info.py Co-authored-by: Felix Fontein * Update plugins/modules/zypper_repository_info.py Co-authored-by: Felix Fontein * Update plugins/modules/zypper_repository_info.py Co-authored-by: Felix Fontein * removing irrelevant return fields from the sample * Quoting the line with the colon * fixing syntax errors in sam * removing duplicate Note section * Removing newline on sample-code * Update version_added to 9.5.0 Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update description (notespelling) Co-authored-by: Felix Fontein * Update .github/BOTMETA.yml Co-authored-by: Felix Fontein * Update descriptino (notespelling URL) Co-authored-by: Felix Fontein * use module_utils.deps to include the parseString/parseXML module * clean up module and botmeta cleanup: * remove duplicate entry from botmeta * move imports below DOCUMENTATION * remove unused imports * add required key 'returned' * moving import and fixing syntax In this documentation example the return type is dict instead of dictionary: https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_documenting.html#return-block Also the from __future__ import should be at the beginning of the file, according to lint * Enabling check-mode because the module doesn't change anything anyways * indicate empty dictionary Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update version_added Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .github/BOTMETA.yml | 3 + plugins/modules/zypper_repository_info.py | 137 ++++++++++++++++++ .../targets/zypper_repository/tasks/test.yml | 8 + .../tasks/zypper_repository_info.yml | 26 ++++ 4 files changed, 174 insertions(+) create mode 100644 plugins/modules/zypper_repository_info.py create mode 100644 tests/integration/targets/zypper_repository/tasks/zypper_repository_info.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fac8adad78..c49080bc94 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1441,6 +1441,9 @@ files: maintainers: $team_suse $plugin_utils/ansible_type.py: maintainers: vbotka + $modules/zypper_repository_info.py: + labels: zypper + maintainers: $team_suse TobiasZeuch181 $plugin_utils/keys_filter.py: maintainers: vbotka $plugin_utils/unsafe.py: diff --git a/plugins/modules/zypper_repository_info.py b/plugins/modules/zypper_repository_info.py new file mode 100644 index 0000000000..dab4b9bbe5 --- /dev/null +++ b/plugins/modules/zypper_repository_info.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Tobias Zeuch +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: zypper_repository_info +author: "Tobias Zeuch (@TobiasZeuch181)" +version_added: 10.0.0 +short_description: List Zypper repositories +description: + - List Zypper repositories on SUSE and openSUSE. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +requirements: + - "zypper >= 1.0 (included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0)" + - python-xml +notes: + - "For info about packages, use the module M(ansible.builtin.package_facts)." +''' + +EXAMPLES = ''' +- name: List registered repositories and store in variable repositories + community.general.zypper_repository_info: {} + register: repodatalist +''' + +RETURN = ''' +repodatalist: + description: + - A list of repository descriptions like it is returned by the command C(zypper repos). + type: list + returned: always + elements: dict + contains: + alias: + description: The alias of the repository. + type: str + autorefresh: + description: Indicates, if autorefresh is enabled on the repository. + type: int + enabled: + description: indicates, if the repository is enabled + type: int + gpgcheck: + description: indicates, if the GPG signature of the repository meta data is checked + type: int + name: + description: the name of the repository + type: str + priority: + description: the priority of the repository + type: int + url: + description: The URL of the repository on the internet. + type: str + sample: [ + { + "alias": "SLE-Product-SLES", + "autorefresh": "1", + "enabled": "1", + "gpgcheck": "1", + "name": "SLE-Product-SLES", + "priority": "99", + "url": "http://repo:50000/repo/SUSE/Products/SLE-Product-SLES/15-SP2/x86_64/product" + } + ] +''' + + +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("parseXML"): + from xml.dom.minidom import parseString as parseXML + +from ansible.module_utils.basic import AnsibleModule + +REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] + + +def _get_cmd(module, *args): + """Combines the non-interactive zypper command with arguments/subcommands""" + cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] + cmd.extend(args) + + return cmd + + +def _parse_repos(module): + """parses the output of zypper --xmlout repos and return a parse repo dictionary""" + cmd = _get_cmd(module, '--xmlout', 'repos') + + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + repos = [] + dom = parseXML(stdout) + repo_list = dom.getElementsByTagName('repo') + for repo in repo_list: + opts = {} + for o in REPO_OPTS: + opts[o] = repo.getAttribute(o) + opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + # A repo can be uniquely identified by an alias + url + repos.append(opts) + return repos + # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined) + elif rc == 6: + return [] + else: + module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ), + supports_check_mode=True + ) + + deps.validate(parseXML) + + repodatalist = _parse_repos(module) + module.exit_json(changed=False, repodatalist=repodatalist) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/zypper_repository/tasks/test.yml b/tests/integration/targets/zypper_repository/tasks/test.yml index 739b4c2642..e81c3ceb1d 100644 --- a/tests/integration/targets/zypper_repository/tasks/test.yml +++ b/tests/integration/targets/zypper_repository/tasks/test.yml @@ -38,3 +38,11 @@ - name: ensure zypper ref still works command: zypper -n ref + +- block: + - include_tasks: 'zypper_repository.yml' + always: + - name: remove repositories added during test + community.general.zypper_repository: + name: "test" + state: absent \ No newline at end of file diff --git a/tests/integration/targets/zypper_repository/tasks/zypper_repository_info.yml b/tests/integration/targets/zypper_repository/tasks/zypper_repository_info.yml new file mode 100644 index 0000000000..2b96056e3a --- /dev/null +++ b/tests/integration/targets/zypper_repository/tasks/zypper_repository_info.yml @@ -0,0 +1,26 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Add test repo + community.general.zypper_repository: + name: test + state: present + repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64 + register: zypper_result + +- name: read repositories with zypper_repository_info + community.general.zypper_repository_info: + register: repositories + +- name: verify, that test-repo is returned by the repodatalist + assert: + that: + - "{{ 'test' in repositories.repodatalist|map(attribute='name') | list }}" + +- name: Cleanup - Delete test repo + community.general.zypper_repository: + name: test + state: absent + register: zypper_result From a894f8e7ebc28bab587879d98edb0ae704faf837 Mon Sep 17 00:00:00 2001 From: Lincoln Wallace Date: Sun, 13 Oct 2024 07:08:11 -0300 Subject: [PATCH 290/482] snap: improve documentation (#8972) * plugins/modules/snap: improve documentation Signed-off-by: Lincoln Wallace * undo helper setence about finding avaible snaps. Co-authored-by: Farshid Tavakolizadeh * wip: adress reviews Signed-off-by: Lincoln Wallace * fix: revert sentence Signed-off-by: Lincoln Wallace * feat: improve explanation on snap options Co-authored-by: Farshid Tavakolizadeh * clean: remove duplicated and leave reference Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * feat: add note about priviledge scalation and switch sentence position Signed-off-by: Lincoln Wallace * fix: remove additional dash. Co-authored-by: Farshid Tavakolizadeh * feat: reword note and use better doc cross-ref syntax Signed-off-by: Lincoln Wallace * refact: add period. Co-authored-by: Farshid Tavakolizadeh * fix: linter errors Signed-off-by: Lincoln Wallace * fix: remove redundant sentence Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * fix: remove confuse sentence Co-authored-by: Farshid Tavakolizadeh * fix: remove redudant content Signed-off-by: Lincoln Wallace * feat: add missing word Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * refact: remove abreviation Co-authored-by: Felix Fontein * refact: remove abreviation Co-authored-by: Felix Fontein * refact: remove abreviation Co-authored-by: Felix Fontein --------- Signed-off-by: Lincoln Wallace Co-authored-by: Farshid Tavakolizadeh Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- plugins/modules/snap.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index 16c3aec48b..15637f3315 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright (c) 2024, Lincoln Wallace (locnnil) # Copyright (c) 2021, Alexei Znamensky (russoz) # Copyright (c) 2021, Marcus Rickert # Copyright (c) 2018, Stanislas Lange (angristan) @@ -31,8 +32,7 @@ options: - Name of the snaps to be installed. - Any named snap accepted by the C(snap) command is valid. - > - Notice that snap files might require O(dangerous=true) to ignore the error - "cannot find signatures with metadata for snap". + O(dangerous=true) may be necessary when installing `.snap` files. See O(dangerous) for more details. required: true type: list elements: str @@ -47,10 +47,13 @@ options: type: str classic: description: - - Confinement policy. The classic confinement allows a snap to have - the same level of access to the system as "classic" packages, - like those managed by APT. This option corresponds to the C(--classic) argument. - This option can only be specified if there is a single snap in the task. + - Install a snap that has classic confinement. + - This option corresponds to the C(--classic) argument of the C(snap install) command. + - This level of confinement is permissive, granting full system access, + similar to that of traditionally packaged applications that do not use sandboxing mechanisms. + This option can only be specified when the task involves a single snap. + - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. + type: bool required: false default: false @@ -69,18 +72,27 @@ options: - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will only be applied to active snaps. + - Options will only be applied when C(state) is set to V(present). + This is done after the necessary installation + or refresh (upgrade/downgrade) of all the snaps listed in O(name). + - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. + required: false type: list elements: str version_added: 4.4.0 dangerous: description: - - Install the given snap file even if there are no pre-acknowledged signatures for it, - meaning it was not verified and could be dangerous. + - Install the snap in dangerous mode, without validating its assertions and signatures. + - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. + - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. type: bool required: false default: false version_added: 7.2.0 +notes: + - Privileged operations, such as installing and configuring snaps, require root priviledges. + This is only the case if the user has not logged in to the Snap Store. author: - Victor Carceler (@vcarceler) From 12fa2452d8f6f38e4601c0b9fc30f489f971907f Mon Sep 17 00:00:00 2001 From: Gabriele Pongelli Date: Thu, 17 Oct 2024 21:11:29 +0200 Subject: [PATCH 291/482] update gitlab label's color (#9010) * update gitlab label's color fail if both new_name and color are missing, as per Gitlab API docs. * add changelog * Update changelog with suggestion Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * remove unneeded check * Update changelog --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/9010-edit-gitlab-label-color.yaml | 2 ++ plugins/modules/gitlab_label.py | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 changelogs/fragments/9010-edit-gitlab-label-color.yaml diff --git a/changelogs/fragments/9010-edit-gitlab-label-color.yaml b/changelogs/fragments/9010-edit-gitlab-label-color.yaml new file mode 100644 index 0000000000..0959e57772 --- /dev/null +++ b/changelogs/fragments/9010-edit-gitlab-label-color.yaml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py index 635033ab6c..f6e9172eb3 100644 --- a/plugins/modules/gitlab_label.py +++ b/plugins/modules/gitlab_label.py @@ -275,6 +275,8 @@ class GitlabLabels(object): _label.description = var_obj.get('description') if var_obj.get('priority') is not None: _label.priority = var_obj.get('priority') + if var_obj.get('color') is not None: + _label.color = var_obj.get('color') # save returns None _label.save() From 5874711c6e2c6733dc35403df5951e97d65af7ac Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 19 Oct 2024 12:34:55 +0200 Subject: [PATCH 292/482] Fix reuse workflow branches. --- .github/workflows/reuse.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml index 31afe207c5..e5195f6dcf 100644 --- a/.github/workflows/reuse.yml +++ b/.github/workflows/reuse.yml @@ -7,10 +7,14 @@ name: Verify REUSE on: push: - branches: [main] - pull_request_target: + branches: + - main + - stable-* + pull_request: types: [opened, synchronize, reopened] - branches: [main] + branches: + - main + - stable-* # Run CI once per day (at 07:30 UTC) schedule: - cron: '30 7 * * *' From 26fe42776c4205e9cd5d701f93320b9b2981cbe9 Mon Sep 17 00:00:00 2001 From: raspbeguy Date: Sat, 19 Oct 2024 12:49:18 +0200 Subject: [PATCH 293/482] consul_kv: add argument for the datacenter option on consul api (#9026) * consul_kv: add argument for the datacenter option on consul api * changelog: add fragment for #9026 * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9026-consul_kv-datacenter.yml | 2 ++ plugins/modules/consul_kv.py | 10 +++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9026-consul_kv-datacenter.yml diff --git a/changelogs/fragments/9026-consul_kv-datacenter.yml b/changelogs/fragments/9026-consul_kv-datacenter.yml new file mode 100644 index 0000000000..73ddd69266 --- /dev/null +++ b/changelogs/fragments/9026-consul_kv-datacenter.yml @@ -0,0 +1,2 @@ +minor_changes: + - consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026). diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py index 84169fc6b7..fd3a1fce06 100644 --- a/plugins/modules/consul_kv.py +++ b/plugins/modules/consul_kv.py @@ -111,6 +111,12 @@ options: - Whether to verify the tls certificate of the consul agent. type: bool default: true + datacenter: + description: + - The name of the datacenter to query. If unspecified, the query will default + to the datacenter of the Consul agent on O(host). + type: str + version_added: 10.0.0 ''' @@ -291,7 +297,8 @@ def get_consul_api(module): port=module.params.get('port'), scheme=module.params.get('scheme'), verify=module.params.get('validate_certs'), - token=module.params.get('token')) + token=module.params.get('token'), + dc=module.params.get('datacenter')) def test_dependencies(module): @@ -305,6 +312,7 @@ def main(): module = AnsibleModule( argument_spec=dict( cas=dict(type='str'), + datacenter=dict(type='str', default=None), flags=dict(type='str'), key=dict(type='str', required=True, no_log=False), host=dict(type='str', default='localhost'), From 1180843e35f0076104f532cfc5396585279a343b Mon Sep 17 00:00:00 2001 From: Zac Date: Sat, 19 Oct 2024 05:49:47 -0500 Subject: [PATCH 294/482] bitwarden_secrets_manager lookup plugin: support more current versions of BWS CLI (#9028) * add support for getting secrets in the current version of bitwarden secrets manager * format * format2 * fragment * fix formatting errors * strip out junk before the version in cli output * mock the --version command in the unit tests * use LooseVersion comparison - russoz suggestion * add blank line --- ...8-bitwarden-secrets-manager-syntax-fix.yml | 2 ++ plugins/lookup/bitwarden_secrets_manager.py | 23 +++++++++++++++++-- .../lookup/test_bitwarden_secrets_manager.py | 4 ++++ 3 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml diff --git a/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml b/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml new file mode 100644 index 0000000000..d542692f45 --- /dev/null +++ b/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028)." \ No newline at end of file diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py index 8cabc693ff..3d08067105 100644 --- a/plugins/lookup/bitwarden_secrets_manager.py +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -77,6 +77,8 @@ from ansible.module_utils.common.text.converters import to_text from ansible.parsing.ajson import AnsibleJSONDecoder from ansible.plugins.lookup import LookupBase +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + class BitwardenSecretsManagerException(AnsibleLookupError): pass @@ -114,6 +116,15 @@ class BitwardenSecretsManager(object): rc = p.wait() return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + def get_bws_version(self): + """Get the version of the Bitwarden Secrets Manager CLI. + """ + out, err, rc = self._run(['--version']) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + # strip the prefix and grab the last segment, the version number + return out.split()[-1] + def get_secret(self, secret_id, bws_access_token): """Get and return the secret with the given secret_id. """ @@ -122,10 +133,18 @@ class BitwardenSecretsManager(object): # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. params = [ '--color', 'no', - '--access-token', bws_access_token, - 'get', 'secret', secret_id + '--access-token', bws_access_token ] + # bws version 0.3.0 introduced a breaking change in the command line syntax: + # pre-0.3.0: verb noun + # 0.3.0 and later: noun verb + bws_version = self.get_bws_version() + if LooseVersion(bws_version) < LooseVersion('0.3.0'): + params.extend(['get', 'secret', secret_id]) + else: + params.extend(['secret', 'get', secret_id]) + out, err, rc = self._run_with_retry(params) if rc != 0: raise BitwardenSecretsManagerException(to_text(err)) diff --git a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py index aaeaf79eaf..5d2abeffa8 100644 --- a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py +++ b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py @@ -45,6 +45,10 @@ MOCK_SECRETS = [ class MockBitwardenSecretsManager(BitwardenSecretsManager): def _run(self, args, stdin=None): + # mock the --version call + if args[0] == "--version": + return "bws 1.0.0", "", 0 + # secret_id is the last argument passed to the bws CLI secret_id = args[-1] rc = 1 From 86166ccade8332b219e4f09de34781ec424917b5 Mon Sep 17 00:00:00 2001 From: Thibaut Decombe <68703331+UnknownPlatypus@users.noreply.github.com> Date: Sat, 19 Oct 2024 12:50:46 +0200 Subject: [PATCH 295/482] Speed up brew module package install & upgrade (#9022) * Verify installation via `brew install` return code in`_install_current_package` (Skip one brew info) * Avoid computing `current_package_is_installed` twice in a row * Verify installation via `brew install` return code in `_upgrade_current_package(Skip 2 brew commands) * Add changelog fragment * Update changelogs/fragments/9022-improve-homebrew-perf.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9022-improve-homebrew-perf.yml | 2 ++ plugins/modules/homebrew.py | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/9022-improve-homebrew-perf.yml diff --git a/changelogs/fragments/9022-improve-homebrew-perf.yml b/changelogs/fragments/9022-improve-homebrew-perf.yml new file mode 100644 index 0000000000..077b5caefc --- /dev/null +++ b/changelogs/fragments/9022-improve-homebrew-perf.yml @@ -0,0 +1,2 @@ +minor_changes: + - homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022). diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 2b60846b43..58b13f83d4 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -572,7 +572,7 @@ class Homebrew(object): cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if self._current_package_is_installed(): + if rc == 0: self.changed_count += 1 self.changed_pkgs.append(self.current_package) self.changed = True @@ -600,10 +600,11 @@ class Homebrew(object): self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) - if not self._current_package_is_installed(): + current_package_is_installed = self._current_package_is_installed() + if not current_package_is_installed: command = 'install' - if self._current_package_is_installed() and not self._current_package_is_outdated(): + if current_package_is_installed and not self._current_package_is_outdated(): self.message = 'Package is already upgraded: {0}'.format( self.current_package, ) @@ -626,7 +627,7 @@ class Homebrew(object): cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if self._current_package_is_installed() and not self._current_package_is_outdated(): + if rc == 0: self.changed_count += 1 self.changed_pkgs.append(self.current_package) self.changed = True From 064f76c27bcbb7236de1f8bdfd884a5d6ff12e38 Mon Sep 17 00:00:00 2001 From: Connor Newton Date: Sat, 19 Oct 2024 11:51:50 +0100 Subject: [PATCH 296/482] New module: Jenkins node management (#9016) * New module: Jenkins node management * Fix sanity errors * (Try to) fix mock import for Python 2.7 * Remove encoding from XML tostring in hopes of appeasing py27 Default encoding is probably always good enough. * Turns out that encoding parameter was important for python3... It's not super obvious how to resolve in a 2 + 3 compatible way, so branch and omit encoding for 2. * Implement review fixes and suggestions --- .github/BOTMETA.yml | 2 + plugins/modules/jenkins_node.py | 385 ++++++++++++ .../unit/plugins/modules/test_jenkins_node.py | 575 ++++++++++++++++++ tests/unit/requirements.txt | 3 + 4 files changed, 965 insertions(+) create mode 100644 plugins/modules/jenkins_node.py create mode 100644 tests/unit/plugins/modules/test_jenkins_node.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c49080bc94..935fe11664 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -762,6 +762,8 @@ files: maintainers: sermilrod $modules/jenkins_job_info.py: maintainers: stpierre + $modules/jenkins_node.py: + maintainers: phyrwork $modules/jenkins_plugin.py: maintainers: jtyr $modules/jenkins_script.py: diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py new file mode 100644 index 0000000000..2ee4a481a5 --- /dev/null +++ b/plugins/modules/jenkins_node.py @@ -0,0 +1,385 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: jenkins_node +short_description: Manage Jenkins nodes +version_added: 10.0.0 +description: + - Manage Jenkins nodes with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Connor Newton (@phyrwork) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - Check mode is unable to show configuration changes for a node that is not yet + present. + diff_mode: + support: none +options: + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + name: + description: + - Name of the Jenkins node to manage. + required: true + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str + token: + description: + - API token to authenticate with the Jenkins server. + type: str + state: + description: + - Specifies whether the Jenkins node should be V(present) (created), V(absent) + (deleted), V(enabled) (online) or V(disabled) (offline). + default: present + choices: ['enabled', 'disabled', 'present', 'absent'] + type: str + num_executors: + description: + - When specified, sets the Jenkins node executor count. + type: int + labels: + description: + - When specified, sets the Jenkins node labels. + type: list + elements: str +''' + +EXAMPLES = ''' +- name: Create a Jenkins node using token authentication + community.general.jenkins_node: + url: http://localhost:8080 + user: jenkins + token: 11eb751baabb66c4d1cb8dc4e0fb142cde + name: my-node + state: present + +- name: Set number of executors on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + num_executors: 4 + +- name: Set labels on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + labels: + - label-1 + - label-2 + - label-3 +''' + +RETURN = ''' +--- +url: + description: URL used to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +user: + description: User used for authentication. + returned: success + type: str + sample: jenkins +name: + description: Name of the Jenkins node. + returned: success + type: str + sample: my-node +state: + description: State of the Jenkins node. + returned: success + type: str + sample: present +created: + description: Whether or not the Jenkins node was created by the task. + returned: success + type: bool +deleted: + description: Whether or not the Jenkins node was deleted by the task. + returned: success + type: bool +disabled: + description: Whether or not the Jenkins node was disabled by the task. + returned: success + type: bool +enabled: + description: Whether or not the Jenkins node was enabled by the task. + returned: success + type: bool +configured: + description: Whether or not the Jenkins node was configured by the task. + returned: success + type: bool +''' + +import sys +from xml.etree import ElementTree + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare( + "python-jenkins", + reason="python-jenkins is required to interact with Jenkins", + url="https://opendev.org/jjb/python-jenkins", +): + import jenkins + + +IS_PYTHON_2 = sys.version_info[0] <= 2 + + +class JenkinsNode: + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.state = module.params['state'] + self.token = module.params['token'] + self.user = module.params['user'] + self.url = module.params['url'] + self.num_executors = module.params['num_executors'] + self.labels = module.params['labels'] + + if self.labels is not None: + for label in self.labels: + if " " in label: + self.module.fail_json("labels must not contain spaces: got invalid label {}".format(label)) + + self.instance = self.get_jenkins_instance() + self.result = { + 'changed': False, + 'url': self.url, + 'user': self.user, + 'name': self.name, + 'state': self.state, + 'created': False, + 'deleted': False, + 'disabled': False, + 'enabled': False, + 'configured': False, + 'warnings': [], + } + + def get_jenkins_instance(self): + try: + if self.user and self.token: + return jenkins.Jenkins(self.url, self.user, self.token) + elif self.user and not self.token: + return jenkins.Jenkins(self.url, self.user) + else: + return jenkins.Jenkins(self.url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def configure_node(self, present): + if not present: + # Node would only not be present if in check mode and if not present there + # is no way to know what would and would not be changed. + if not self.module.check_mode: + raise Exception("configure_node present is False outside of check mode") + return + + configured = False + + data = self.instance.get_node_config(self.name) + root = ElementTree.fromstring(data) + + if self.num_executors is not None: + elem = root.find('numExecutors') + if elem is None: + elem = ElementTree.SubElement(root, 'numExecutors') + if elem.text is None or int(elem.text) != self.num_executors: + elem.text = str(self.num_executors) + configured = True + + if self.labels is not None: + elem = root.find('label') + if elem is None: + elem = ElementTree.SubElement(root, 'label') + labels = [] + if elem.text: + labels = elem.text.split() + if labels != self.labels: + elem.text = " ".join(self.labels) + configured = True + + if configured: + if IS_PYTHON_2: + data = ElementTree.tostring(root) + else: + data = ElementTree.tostring(root, encoding="unicode") + + self.instance.reconfig_node(self.name, data) + + self.result['configured'] = configured + if configured: + self.result['changed'] = True + + def present_node(self): + def create_node(): + try: + self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting new resources. If the node is + # created OK then can ignore the error. + if not self.instance.node_exists(self.name): + raise e + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node created: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + created = False + if not present: + if not self.module.check_mode: + create_node() + present = True + + created = True + + self.configure_node(present) + + self.result['created'] = created + if created: + self.result['changed'] = True + + return present # Used to gate downstream queries when in check mode. + + def absent_node(self): + def delete_node(): + try: + self.instance.delete_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting new resources. If the node is + # deleted OK then can ignore the error. + if self.instance.node_exists(self.name): + raise e + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node deleted: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + deleted = False + if present: + if not self.module.check_mode: + delete_node() + + deleted = True + + self.result['deleted'] = deleted + if deleted: + self.result['changed'] = True + + def enabled_node(self): + present = self.present_node() + + enabled = False + + if present: + info = self.instance.get_node_info(self.name) + + if info['offline']: + if not self.module.check_mode: + self.instance.enable_node(self.name) + + enabled = True + else: + # Would have created node with initial state enabled therefore would not have + # needed to enable therefore not enabled. + if not self.module.check_mode: + raise Exception("enabled_node present is False outside of check mode") + enabled = False + + self.result['enabled'] = enabled + if enabled: + self.result['changed'] = True + + def disabled_node(self): + present = self.present_node() + + disabled = False + + if present: + info = self.instance.get_node_info(self.name) + + if not info['offline']: + if not self.module.check_mode: + self.instance.disable_node(self.name) + + disabled = True + else: + # Would have created node with initial state enabled therefore would have + # needed to disable therefore disabled. + if not self.module.check_mode: + raise Exception("disabled_node present is False outside of check mode") + disabled = True + + self.result['disabled'] = disabled + if disabled: + self.result['changed'] = True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str'), + url=dict(default='http://localhost:8080'), + user=dict(), + token=dict(no_log=True), + state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'), + num_executors=dict(type='int'), + labels=dict(type='list', elements='str'), + ), + supports_check_mode=True, + ) + + deps.validate(module) + + jenkins_node = JenkinsNode(module) + + state = module.params.get('state') + if state == 'enabled': + jenkins_node.enabled_node() + elif state == 'disabled': + jenkins_node.disabled_node() + elif state == 'present': + jenkins_node.present_node() + else: + jenkins_node.absent_node() + + module.exit_json(**jenkins_node.result) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_jenkins_node.py b/tests/unit/plugins/modules/test_jenkins_node.py new file mode 100644 index 0000000000..33e7ca0f13 --- /dev/null +++ b/tests/unit/plugins/modules/test_jenkins_node.py @@ -0,0 +1,575 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import jenkins +import json + +from xml.etree import ElementTree as et + +import pytest + +from ansible.module_utils import basic +from ansible.module_utils.common.text.converters import to_bytes +from ansible_collections.community.general.tests.unit.compat.mock import patch, call +from ansible_collections.community.general.plugins.modules import jenkins_node +from pytest import fixture, raises, mark, param + + +def xml_equal(x, y): + # type: (et.Element | str, et.Element | str) -> bool + if isinstance(x, str): + x = et.fromstring(x) + + if isinstance(y, str): + y = et.fromstring(y) + + if x.tag != y.tag: + return False + + if x.attrib != y.attrib: + return False + + if (x.text or "").strip() != (y.text or "").strip(): + return False + + x_children = list(x) + y_children = list(y) + + if len(x_children) != len(y_children): + return False + + for x, y in zip(x_children, y_children): + if not xml_equal(x, y): + return False + + return True + + +def assert_xml_equal(x, y): + if xml_equal(x, y): + return True + + if not isinstance(x, str): + x = et.tostring(x) + + if not isinstance(y, str): + y = et.tostring(y) + + raise AssertionError("{} != {}".format(x, y)) + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + def __init__(self, value): + self.value = value + + def __getitem__(self, item): + return self.value[item] + + +def exit_json(*args, **kwargs): + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +class AnsibleFailJson(Exception): + pass + + +def fail_json(*args, **kwargs): + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +@fixture(autouse=True) +def module(): + with patch.multiple( + "ansible.module_utils.basic.AnsibleModule", + exit_json=exit_json, + fail_json=fail_json, + ): + yield + + +@fixture +def instance(): + with patch("jenkins.Jenkins", autospec=True) as instance: + yield instance + + +@fixture +def get_instance(instance): + with patch( + "ansible_collections.community.general.plugins.modules.jenkins_node.JenkinsNode.get_jenkins_instance", + autospec=True, + ) as mock: + mock.return_value = instance + yield mock + + +def test_get_jenkins_instance_with_user_and_token(instance): + instance.node_exists.return_value = False + + set_module_args({ + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + "user": "admin", + "token": "password", + }) + + with pytest.raises(AnsibleExitJson): + jenkins_node.main() + + assert instance.call_args == call("https://localhost:8080", "admin", "password") + + +def test_get_jenkins_instance_with_user(instance): + instance.node_exists.return_value = False + + set_module_args({ + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + "user": "admin", + }) + + with pytest.raises(AnsibleExitJson): + jenkins_node.main() + + assert instance.call_args == call("https://localhost:8080", "admin") + + +def test_get_jenkins_instance_with_no_credential(instance): + instance.node_exists.return_value = False + + set_module_args({ + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + }) + + with pytest.raises(AnsibleExitJson): + jenkins_node.main() + + assert instance.call_args == call("https://localhost:8080") + + +PRESENT_STATES = ["present", "enabled", "disabled"] + + +@mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) +def test_state_present_when_absent(get_instance, instance, state): + instance.node_exists.return_value = False + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": state, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) + + assert result.value["created"] is True + assert result.value["changed"] is True + + +@mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) +def test_state_present_when_absent_check_mode(get_instance, instance, state): + instance.node_exists.return_value = False + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": state, + "_ansible_check_mode": True, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.create_node.called + + assert result.value["created"] is True + assert result.value["changed"] is True + + +def test_state_present_when_present(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "present", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.create_node.called + + assert result.value["created"] is False + assert result.value["changed"] is False + + +def test_state_absent_when_present(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "absent", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.delete_node.call_args == call("my-node") + + assert result.value["deleted"] is True + assert result.value["changed"] is True + + +def test_state_absent_when_present_check_mode(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "absent", + "_ansible_check_mode": True, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.delete_node.called + + assert result.value["deleted"] is True + assert result.value["changed"] is True + + +def test_state_absent_when_absent(get_instance, instance): + instance.node_exists.return_value = False + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "absent", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.delete_node.called + + assert result.value["deleted"] is False + assert result.value["changed"] is False + + +def test_state_enabled_when_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": True} + + set_module_args({ + "name": "my-node", + "state": "enabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.enable_node.call_args == call("my-node") + + assert result.value["enabled"] is True + assert result.value["changed"] is True + + +def test_state_enabled_when_offline_check_mode(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": True} + + set_module_args({ + "name": "my-node", + "state": "enabled", + "_ansible_check_mode": True, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.enable_node.called + + assert result.value["enabled"] is True + assert result.value["changed"] is True + + +def test_state_enabled_when_not_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": False} + + set_module_args({ + "name": "my-node", + "state": "enabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.enable_node.called + + assert result.value["enabled"] is False + assert result.value["changed"] is False + + +def test_state_disabled_when_not_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": False} + + set_module_args({ + "name": "my-node", + "state": "disabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.disable_node.call_args == call("my-node") + + assert result.value["disabled"] is True + assert result.value["changed"] is True + + +def test_state_disabled_when_not_offline_check_mode(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": False} + + set_module_args({ + "name": "my-node", + "state": "disabled", + "_ansible_check_mode": True, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.disable_node.called + + assert result.value["disabled"] is True + assert result.value["changed"] is True + + +def test_state_disabled_when_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = {"offline": True} + + set_module_args({ + "name": "my-node", + "state": "disabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.disable_node.called + + assert result.value["disabled"] is False + assert result.value["changed"] is False + + +def test_configure_num_executors_when_not_configured(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "present", + "num_executors": 3, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.reconfig_node.call_args[0][0] == "my-node" + assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + + 3 + +""") + + assert result.value["configured"] is True + assert result.value["changed"] is True + + +def test_configure_num_executors_when_not_equal(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = """ + + 3 + +""" + + set_module_args({ + "name": "my-node", + "state": "present", + "num_executors": 2, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + + 2 + +""") + + assert result.value["configured"] is True + assert result.value["changed"] is True + + +def test_configure_num_executors_when_equal(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = """ + + 2 + +""" + + set_module_args({ + "name": "my-node", + "state": "present", + "num_executors": 2, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.reconfig_node.called + + assert result.value["configured"] is False + assert result.value["changed"] is False + + +def test_configure_labels_when_not_configured(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "present", + "labels": [ + "a", + "b", + "c", + ], + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.reconfig_node.call_args[0][0] == "my-node" + assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + + + +""") + + assert result.value["configured"] is True + assert result.value["changed"] is True + + +def test_configure_labels_when_not_equal(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = """ + + + +""" + + set_module_args({ + "name": "my-node", + "state": "present", + "labels": [ + "a", + "z", + "c", + ], + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.reconfig_node.call_args[0][0] == "my-node" + assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + + + +""") + + assert result.value["configured"] is True + assert result.value["changed"] is True + + +def test_configure_labels_when_equal(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = """ + + + +""" + + set_module_args({ + "name": "my-node", + "state": "present", + "labels": [ + "a", + "b", + "c", + ], + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.reconfig_node.called + + assert result.value["configured"] is False + assert result.value["changed"] is False + + +def test_configure_labels_fail_when_contains_space(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + + set_module_args({ + "name": "my-node", + "state": "present", + "labels": [ + "a error", + ], + }) + + with raises(AnsibleFailJson): + jenkins_node.main() + + assert not instance.reconfig_node.called diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index 218fe45673..cfc8493912 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -54,3 +54,6 @@ proxmoxer ; python_version > '3.6' #requirements for nomad_token modules python-nomad < 2.0.0 ; python_version <= '3.6' python-nomad >= 2.0.0 ; python_version >= '3.7' + +# requirement for jenkins_build, jenkins_node, jenkins_plugin modules +python-jenkins >= 0.4.12 \ No newline at end of file From b1f4604067b5af4341c9febe24697a658d3fcfdd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 19 Oct 2024 14:05:48 +0200 Subject: [PATCH 297/482] Skip timezone test on RHEL 7.9 VMs (#9035) Skip timezone test on RHEL 7.9 VMs. --- tests/integration/targets/timezone/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/timezone/aliases b/tests/integration/targets/timezone/aliases index 007bed5386..c7f0c89c23 100644 --- a/tests/integration/targets/timezone/aliases +++ b/tests/integration/targets/timezone/aliases @@ -7,3 +7,4 @@ destructive skip/aix skip/osx skip/macos +skip/rhel7.9 # TODO: '/bin/timedatectl set-local-rtc no' fails with 'Failed to set local RTC: Failed to set RTC to local/UTC: Input/output error' From 658637dc700f6e795074a22071fa7a40ef7f11fb Mon Sep 17 00:00:00 2001 From: Victor Gaudard Date: Sat, 19 Oct 2024 16:03:49 -0400 Subject: [PATCH 298/482] =?UTF-8?q?keycloak=5Fgroup:=20fix=20subgroup=20cr?= =?UTF-8?q?eation=20in=20Keycloak=20=E2=89=A523=20(#8979)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * keycloak_group: fix subgroup creation in Keycloak ≥23 * Add changelog fragment * Include issue and pull request in changelog fragment Co-authored-by: Felix Fontein * Use new way to get subgroups when getting a subgroup chain * Fix indent --------- Co-authored-by: Felix Fontein --- .../8979-keycloak_group-fix-subgroups.yml | 2 ++ .../identity/keycloak/keycloak.py | 21 +++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/8979-keycloak_group-fix-subgroups.yml diff --git a/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml b/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml new file mode 100644 index 0000000000..c64a09add6 --- /dev/null +++ b/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979). diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 128b0fee13..15603331b0 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1499,6 +1499,23 @@ class KeycloakAPI(object): self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (gid, realm, str(e))) + def get_subgroups(self, parent, realm="master"): + if 'subGroupCount' in parent: + # Since version 23, when GETting a group Keycloak does not + # return subGroups but only a subGroupCount. + # Children must be fetched in a second request. + if parent['subGroupCount'] == 0: + group_children = [] + else: + group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + group_children = json.loads(to_native(open_url(group_children_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + subgroups = group_children + else: + subgroups = parent['subGroups'] + return subgroups + def get_group_by_name(self, name, realm="master", parents=None): """ Fetch a keycloak group within a realm based on its name. @@ -1519,7 +1536,7 @@ class KeycloakAPI(object): if not parent: return None - all_groups = parent['subGroups'] + all_groups = self.get_subgroups(parent, realm) else: all_groups = self.get_groups(realm=realm) @@ -1568,7 +1585,7 @@ class KeycloakAPI(object): return None for p in name_chain[1:]: - for sg in tmp['subGroups']: + for sg in self.get_subgroups(tmp): pv, is_id = self._get_normed_group_parent(p) if is_id: From cc72fa078694872a8884f2f52e4e425ae35f3745 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 19 Oct 2024 22:07:56 +0200 Subject: [PATCH 299/482] groupby_as_dict: mention Jinja2's groupby filter (#9040) Mention Jinja2's groupby filter. --- plugins/filter/groupby_as_dict.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index 4a8f4c6dc1..8e29c5863c 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -13,6 +13,8 @@ DOCUMENTATION = ''' author: Felix Fontein (@felixfontein) description: - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries with the same value, + or when you need a dictionary with list values, or when you need to use deeply nested attributes. positional: attribute options: _input: From 93be499f26a59faa5ba171ab44f22beea2533e74 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Mon, 21 Oct 2024 21:51:57 +0300 Subject: [PATCH 300/482] one_vnet: Fix module (#9019) * Fix one_vnet module * Add CHANGELOG * Fix CHANGELOG --- changelogs/fragments/9019-onevnet-bugfix.yml | 2 ++ plugins/modules/one_vnet.py | 35 +++++++++++--------- 2 files changed, 21 insertions(+), 16 deletions(-) create mode 100644 changelogs/fragments/9019-onevnet-bugfix.yml diff --git a/changelogs/fragments/9019-onevnet-bugfix.yml b/changelogs/fragments/9019-onevnet-bugfix.yml new file mode 100644 index 0000000000..3da3ea0399 --- /dev/null +++ b/changelogs/fragments/9019-onevnet-bugfix.yml @@ -0,0 +1,2 @@ +bugfixes: + - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py index 93523f8b4f..2dcf20de5f 100644 --- a/plugins/modules/one_vnet.py +++ b/plugins/modules/one_vnet.py @@ -213,8 +213,8 @@ parent_network_id: type: int returned: when O(state=present) sample: 1 -vm_mad: - description: The network's VM_MAD. +vn_mad: + description: The network's VN_MAD. type: str returned: when O(state=present) sample: bridge @@ -319,7 +319,7 @@ class NetworksModule(OpenNebulaModule): # the other two parameters are used for pagination, -1 for both essentially means "return all" pool = self.one.vnpool.info(-2, -1, -1) - for template in pool.VMTEMPLATE: + for template in pool.VNET: if predicate(template): return template @@ -339,21 +339,23 @@ class NetworksModule(OpenNebulaModule): def get_networks_ar_pool(self, template): ar_pool = [] - for ar in template.AR_POOL: + template_pool = template.AR_POOL.AR + for ar in range(len(template_pool)): + template_param = template_pool[ar] ar_pool.append({ # These params will always be present - 'ar_id': ar['AR_ID'], - 'mac': ar['MAC'], - 'size': ar['SIZE'], - 'type': ar['TYPE'], + 'ar_id': template_param.AR_ID, + 'mac': template_param.MAC, + 'size': template_param.SIZE, + 'type': template_param.TYPE, # These are optional so firstly check for presence # and if not present set value to Null - 'allocated': getattr(ar, 'ALLOCATED', 'Null'), - 'ip': getattr(ar, 'IP', 'Null'), - 'global_prefix': getattr(ar, 'GLOBAL_PREFIX', 'Null'), - 'parent_network_ar_id': getattr(ar, 'PARENT_NETWORK_AR_ID', 'Null'), - 'ula_prefix': getattr(ar, 'ULA_PREFIX', 'Null'), - 'vn_mad': getattr(ar, 'VN_MAD', 'Null'), + 'allocated': getattr(template_param, 'ALLOCATED', 'Null'), + 'ip': getattr(template_param, 'IP', 'Null'), + 'global_prefix': getattr(template_param, 'GLOBAL_PREFIX', 'Null'), + 'parent_network_ar_id': getattr(template_param, 'PARENT_NETWORK_AR_ID', 'Null'), + 'ula_prefix': getattr(template_param, 'ULA_PREFIX', 'Null'), + 'vn_mad': getattr(template_param, 'VN_MAD', 'Null'), }) return ar_pool @@ -381,7 +383,7 @@ class NetworksModule(OpenNebulaModule): 'bridge': template.BRIDGE, 'bride_type': template.BRIDGE_TYPE, 'parent_network_id': template.PARENT_NETWORK_ID, - 'vm_mad': template.VM_MAD, + 'vn_mad': template.VN_MAD, 'phydev': template.PHYDEV, 'vlan_id': template.VLAN_ID, 'outer_vlan_id': template.OUTER_VLAN_ID, @@ -394,7 +396,8 @@ class NetworksModule(OpenNebulaModule): def create_template(self, name, template_data): if not self.module.check_mode: - self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data) + # -1 means that network won't be added to any cluster which happens by default + self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data, -1) result = self.get_template_info(self.get_template_by_name(name)) result['changed'] = True From 9fb686fe3533e185dd1791fe460e2c24bd8d9658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20B=C3=B6sel?= Date: Mon, 21 Oct 2024 20:52:28 +0200 Subject: [PATCH 301/482] add plugin for generic keycloak component (#8826) * add plugin for generic keycloak component * add changelog fragment * fix import in test * Update plugins/modules/keycloak_component.py Co-authored-by: Felix Fontein * Update plugins/modules/keycloak_component.py Co-authored-by: Felix Fontein * Update plugins/modules/keycloak_component.py Co-authored-by: Felix Fontein * Update plugins/modules/keycloak_component.py Co-authored-by: Felix Fontein * set correct supported diff_mode * fix line lenght * Update docblock Co-authored-by: Felix Fontein * Update changelogs/fragments/8826-keycloak-component.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update plugins/modules/keycloak_component.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update plugins/modules/keycloak_component.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * update docblocks * add entry to BOTMETA.yml * update copyright * Set Version number Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * remove changelog fragment --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .github/BOTMETA.yml | 2 + plugins/modules/keycloak_component.py | 323 +++++++++++++++++ .../modules/test_keycloak_component.py | 327 ++++++++++++++++++ 3 files changed, 652 insertions(+) create mode 100644 plugins/modules/keycloak_component.py create mode 100644 tests/unit/plugins/modules/test_keycloak_component.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 935fe11664..989752a0a7 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -800,6 +800,8 @@ files: maintainers: fynncfchen johncant $modules/keycloak_clientsecret_regenerate.py: maintainers: fynncfchen johncant + $modules/keycloak_component.py: + maintainers: fivetide $modules/keycloak_group.py: maintainers: adamgoossens $modules/keycloak_identity_provider.py: diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py new file mode 100644 index 0000000000..375953c3e8 --- /dev/null +++ b/plugins/modules/keycloak_component.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Björn Bösel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_component + +short_description: Allows administration of Keycloak components via Keycloak API + +version_added: 10.0.0 + +description: + - This module allows the administration of Keycloak components via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the realm being + used must have the requisite access rights. In a default Keycloak installation, C(admin-cli) + and an C(admin) user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). + Aliases are provided so camelCased versions can be used as well. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the Keycloak component. + - On V(present), the component will be created (or updated if it exists already). + - On V(absent), the component will be removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the component to create. + type: str + required: true + parent_id: + description: + - The parent_id of the component. In practice the ID (name) of the realm. + type: str + required: true + provider_id: + description: + - The name of the "provider ID" for the key. + type: str + required: true + provider_type: + description: + - The name of the "provider type" for the key. That is, V(org.keycloak.storage.UserStorageProvider), + V(org.keycloak.userprofile.UserProfileProvider), ... + - See U(https://www.keycloak.org/docs/latest/server_development/index.html#_providers). + type: str + required: true + config: + description: + - Configuration properties for the provider. + - Contents vary depending on the provider type. + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Björn Bösel (@fivetide) +''' + +EXAMPLES = ''' +- name: Manage Keycloak User Storage Provider + community.general.keycloak_component: + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + name: my storage provider + state: present + parent_id: some_realm + provider_id: my storage + provider_type: "org.keycloak.storage.UserStorageProvider" + config: + myCustomKey: "my_custom_key" + cachePolicy: "NO_CACHE" + enabled: true +''' + +RETURN = ''' +end_state: + description: Representation of the keycloak_component after module execution. + returned: on success + type: dict + contains: + id: + description: ID of the component. + type: str + returned: when O(state=present) + sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4 + name: + description: Name of the component. + type: str + returned: when O(state=present) + sample: mykey + parentId: + description: ID of the realm this key belongs to. + type: str + returned: when O(state=present) + sample: myrealm + providerId: + description: The ID of the key provider. + type: str + returned: when O(state=present) + sample: rsa + providerType: + description: The type of provider. + type: str + returned: when O(state=present) + config: + description: component configuration. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from copy import deepcopy + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + parent_id=dict(type='str', required=True), + provider_id=dict(type='str', required=True), + provider_type=dict(type='str', required=True), + config=dict( + type='dict', + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the component if it is already + # present. This is only used for diff-mode. + before_component = {} + before_component['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "parent_id"] + + # Filter and map the parameters names that apply to the role + component_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + provider_type = module.params.get("provider_type") + + # Build a proposed changeset from parameters given to this module + changeset = {} + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example private_key + # becomes privateKey. + # + # It also converts bool, str and int parameters into lists with a single + # entry of 'str' type. Bool values are also lowercased. This is required + # by Keycloak. + # + for component_param in component_params: + if component_param == 'config': + for config_param in module.params.get('config'): + changeset['config'][camel(config_param)] = [] + raw_value = module.params.get('config')[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = str(raw_value) + + changeset['config'][camel(config_param)].append(value) + else: + # No need for camelcase in here as these are one word parameters + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Make it easier to refer to current module parameters + name = module.params.get('name') + force = module.params.get('force') + state = module.params.get('state') + enabled = module.params.get('enabled') + provider_id = module.params.get('provider_id') + provider_type = module.params.get('provider_type') + parent_id = module.params.get('parent_id') + + # Get a list of all Keycloak components that are of keyprovider type. + current_components = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its key ID. Confusingly the key ID is + # also known as the Provider ID. + component_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the key was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the key is + # already present. + for component in current_components: + if component['name'] == name: + component_id = component['id'] + changeset['id'] = component_id + changeset_copy['id'] = component_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_component[param] = component[param] + + if changeset_copy[param] != component[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, component[param], changeset_copy[param]) + result['changed'] = True + # Compare parameters under the "config" key + for p, v in changeset_copy['config'].items(): + try: + before_component['config'][p] = component['config'][p] or [] + except KeyError: + before_component['config'][p] = [] + if changeset_copy['config'][p] != component['config'][p]: + changes += "config.%s: %s -> %s, " % (p, component['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the key). + if component_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_component, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Component %s would be changed: %s" % (name, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Component %s changed: %s" % (name, changes.strip(", ")) + else: + result['msg'] = "Component %s was in sync" % (name) + + result['end_state'] = changeset_copy + elif component_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_component, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be deleted" % (name) + else: + kc.delete_component(component_id, parent_id) + result['changed'] = True + result['msg'] = "Component %s deleted" % (name) + + result['end_state'] = {} + elif not component_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be created" % (name) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Component %s created" % (name) + + result['end_state'] = changeset_copy + elif not component_id and state == 'absent': + result['changed'] = False + result['msg'] = "Component %s not present" % (name) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/test_keycloak_component.py b/tests/unit/plugins/modules/test_keycloak_component.py new file mode 100644 index 0000000000..e491bf431c --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_component.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager +from itertools import count + +from ansible.module_utils.six import StringIO +from ansible_collections.community.general.plugins.modules import keycloak_realm_key +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules import keycloak_component + + +@contextmanager +def patch_keycloak_api(get_components=None, create_component=None, update_component=None, delete_component=None): + """Mock context manager for patching the methods in KeycloakAPI + """ + + obj = keycloak_realm_key.KeycloakAPI + with patch.object(obj, 'get_components', side_effect=get_components) \ + as mock_get_components: + with patch.object(obj, 'create_component', side_effect=create_component) \ + as mock_create_component: + with patch.object(obj, 'update_component', side_effect=update_component) \ + as mock_update_component: + with patch.object(obj, 'delete_component', side_effect=delete_component) \ + as mock_delete_component: + yield mock_get_components, mock_create_component, mock_update_component, mock_delete_component + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakComponent(ModuleTestCase): + def setUp(self): + super(TestKeycloakComponent, self).setUp() + self.module = keycloak_component + + def test_create_when_absent(self): + """Add a new realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'test-user-provider', + 'state': 'present', + 'provider_id': 'my-provider', + 'provider_type': 'org.keycloak.storage.UserStorageProvider', + 'config': { + 'enabled': True, + 'my_custom_config': 'foo', + }, + } + return_value_component_create = [ + { + "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9", + "name": "test-user-provider", + "providerId": "my-provider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "myCustomConfig": [ + "foo", + ], + "enabled": [ + "true" + ], + } + } + ] + # get before_comp, get default_mapper, get after_mapper + return_value_components_get = [ + [], [], [] + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_create_component.mock_calls), 1) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # must not contain parent_id + mock_create_component.assert_called_once_with({ + 'name': 'test-user-provider', + 'providerId': 'my-provider', + 'providerType': 'org.keycloak.storage.UserStorageProvider', + 'config': { + 'enabled': ['true'], + 'myCustomConfig': ['foo'], + }, + }, 'realm-name') + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'test-user-provider', + 'state': 'present', + 'provider_id': 'my-provider', + 'provider_type': 'org.keycloak.storage.UserStorageProvider', + 'config': { + 'enabled': True, + 'my_custom_config': 'foo', + }, + } + return_value_components_get = [ + [ + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "test-user-provider", + "providerId": "rsa", + "providerType": "org.keycloak.storage.UserStorageProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "myCustomConfig": [ + "foo", + ], + "enabled": [ + "true" + ], + } + }, + ], + [], + [] + ] + return_value_component_update = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 1) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'test-user-provider', + 'state': 'absent', + 'provider_id': 'my-provider', + 'provider_type': 'org.keycloak.storage.UserStorageProvider', + 'config': { + 'enabled': True, + 'my_custom_config': 'foo', + }, + } + return_value_components_get = [ + [] + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'test-user-provider', + 'state': 'absent', + 'provider_id': 'my-provider', + 'provider_type': 'org.keycloak.storage.UserStorageProvider', + 'config': { + 'enabled': True, + 'my_custom_config': 'foo', + }, + } + + return_value_components_get = [ + [ + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "test-user-provider", + "providerId": "my-provider", + "providerType": "org.keycloak.storage.UserStorageProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "myCustomConfig": [ + "foo", + ], + "enabled": [ + "true" + ], + } + }, + ], + [], + [] + ] + return_value_component_delete = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 5b3b7a1fb19fc5a2d0df6f29a86159b0cdefc503 Mon Sep 17 00:00:00 2001 From: lewismiddleton <66401219+lewismiddleton@users.noreply.github.com> Date: Mon, 21 Oct 2024 19:53:18 +0100 Subject: [PATCH 302/482] github_app_access_token: add support for private key fact (#8989) * github_app_access_token: add support for private key fact Adds support for specifying the GitHub App private key via an ansible fact instead of a path to a file. This is useful when you want to generate registration tokens for a remote host but don't want to put secrets on the host. * Add license file * Fix pep8 formatting * Add changelog fragment * Run sanity tests on changelog * Apply suggestions from code review Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein * Add input validation check * Add import * Apply suggestions from code review Co-authored-by: Felix Fontein * Add error for mutually exclusive options * Update plugins/lookup/github_app_access_token.py Co-authored-by: Felix Fontein --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- .../8989-github-app-token-from-fact.yml | 2 ++ plugins/lookup/github_app_access_token.py | 24 +++++++++++---- .../files/app-private-key.pem | 27 +++++++++++++++++ .../files/app-private-key.pem.license | 3 ++ .../github_app_access_token/tasks/main.yml | 30 +++++++++++++++++++ .../github_app_access_token/vars/main.yml | 6 ++++ .../lookup/test_github_app_access_token.py | 20 ++++++++++++- 7 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/8989-github-app-token-from-fact.yml create mode 100644 tests/integration/targets/github_app_access_token/files/app-private-key.pem create mode 100644 tests/integration/targets/github_app_access_token/files/app-private-key.pem.license create mode 100644 tests/integration/targets/github_app_access_token/tasks/main.yml create mode 100644 tests/integration/targets/github_app_access_token/vars/main.yml diff --git a/changelogs/fragments/8989-github-app-token-from-fact.yml b/changelogs/fragments/8989-github-app-token-from-fact.yml new file mode 100644 index 0000000000..6b36d95a62 --- /dev/null +++ b/changelogs/fragments/8989-github-app-token-from-fact.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_app_access_token lookup plugin - adds new ``private_key`` parameter (https://github.com/ansible-collections/community.general/pull/8989). diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index 5cd99b81c7..1d3c526c33 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -19,7 +19,7 @@ DOCUMENTATION = ''' key_path: description: - Path to your private key. - required: true + - Either O(key_path) or O(private_key) must be specified. type: path app_id: description: @@ -34,6 +34,12 @@ DOCUMENTATION = ''' - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. required: true type: str + private_key: + description: + - GitHub App private key in PEM file format as string. + - Either O(key_path) or O(private_key) must be specified. + type: str + version_added: 10.0.0 token_expiry: description: - How long the token should last for in seconds. @@ -71,7 +77,7 @@ import time import json from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display @@ -84,8 +90,10 @@ else: display = Display() -def read_key(path): +def read_key(path, private_key=None): try: + if private_key: + return jwk_from_pem(private_key.encode('utf-8')) with open(path, 'rb') as pem_file: return jwk_from_pem(pem_file.read()) except Exception as e: @@ -132,8 +140,8 @@ def post_request(generated_jwt, installation_id): return json_data.get('token') -def get_token(key_path, app_id, installation_id, expiry=600): - jwk = read_key(key_path) +def get_token(key_path, app_id, installation_id, private_key, expiry=600): + jwk = read_key(key_path, private_key) generated_jwt = encode_jwt(app_id, jwk, exp=expiry) return post_request(generated_jwt, installation_id) @@ -146,10 +154,16 @@ class LookupModule(LookupBase): self.set_options(var_options=variables, direct=kwargs) + if not (self.get_option("key_path") or self.get_option("private_key")): + raise AnsibleOptionsError("One of key_path or private_key is required") + if self.get_option("key_path") and self.get_option("private_key"): + raise AnsibleOptionsError("key_path and private_key are mutually exclusive") + t = get_token( self.get_option('key_path'), self.get_option('app_id'), self.get_option('installation_id'), + self.get_option('private_key'), self.get_option('token_expiry'), ) diff --git a/tests/integration/targets/github_app_access_token/files/app-private-key.pem b/tests/integration/targets/github_app_access_token/files/app-private-key.pem new file mode 100644 index 0000000000..06c6786ee6 --- /dev/null +++ b/tests/integration/targets/github_app_access_token/files/app-private-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAr/EjKUujUdliSX79ZlDwq/+RCnOF1JCrekWGOOK4YGqgfJBM +Z/CLHYTW+BQAH172NTEwLlegwJpXtalae9WVhyMs4sFm7nxSZsFjRK7Gof1tuFbD +i4+GGlu4kci7xVcxzoZoVvswX4Xw/9MCg/Je35H8xbugwsWYg+ou79e0wx0fYU4d +dwiUte8K+/d1l5acMQuqcnUfJLRmXvw3w7hyemB51EPTGqkpcA4KmYns1W12ianD +Zo9/d2kLC2mcyxDkHmqWCv9vfUVrKB7yIC8DU5uY/acFtBaVE1yyvI+1lCCkxNWX +5IDbpP/xRJk68B0WXKaU2IdFUVYSD48u3nSZoQIDAQABAoIBAGLL9KOevqIagK+m +qKKItuzOgOKuhisb5b6uRbWx0jkKBv6LhOwkzemQi6oYiQ0UpQqviU+sky80PCZd +Z9r7z5Bn9y+JzMQEeb0LwTNzNUUHa1JFHl9DA9nPQXBTmOUyllxTa0nUmZA6RV9S +XSo8snu2nYtnVdmpXYBNw3eY1/9rb1blXEZHLJbCTaTX3MuWDYuJ4G0K6EArjSwG +DDGhOWIWfkk3zZAHqdsrJxgqXIx2Cv9m40hmC0XMwqh8/H3j4kZZhdglJhNbvnBM +8ZKRzpMOP1hbGATmi9A1HU+o6BpdIl1dyMRiod3WjSS7CKvs8BVR0XMK/SXDV9Wl +Jy6kwYUCgYEA4HwzV/YT+cTb61VL5ICj871m5VMaGJD96dOrnX33QYRNw5aLRc35 +HMaJ1t5Bp0d2J5h0mPoQkSvQxuPYfaytTYknSUE/bObYNMEnII3XeTmA8ILlG7kV +8OQah66GMKjyHocie2PxUuu9BWtuPvZJDrOuR9Pmw5aH6+oiXBSM0YcCgYEAyKRW +FHtDGC8ZHgBaytaGvlbVo3RTKboQgYqzf9HdvzWHlSbeZVuCk6MWtSNU+5+26RBK +FCI8FTBxqY/vai9zRgp/1u3jY3N1WIsowBgBV7C84IP6gEr/FAJlx++Eqzfmx1W7 +lU3/0IJ/jYS7D6C4aADifo4aGF0mFHFBk7sfpZcCgYBaIyTOnf15XgVcIjy9/LVY +amXFkS+6S4XY/Og87dZ5VTGQZoN3vPPZDRNN1qKQE46q6Xlv74D1eZ10Lwq/s7VG +m9rNfEiGZs7Lp/8ZADtT7rYKXNS35AKeXkkU0AwLv9qwTVyYJRJCVGvqoC99UpEV +OSqyprBTOr9LCBFR3eKJQwKBgHXRqoqUZy3IWmN3qdj6aF1U+Fbnc/5IuHCZVhZ0 +0lX5xQgcrvOt7NttJWRwvvKTMwFhA18XS1jV/aioUNp1yqcSe0dmoeRAZGP+M4u5 +jPBFZGQim/LCF09UqRfi2nEAfpAHFAP0rYdvWh9sFbxzkFXiTx4pq8Eq0bWnW+64 +Lzk5AoGAVeV9KgqJZLbl2Vbii3bJOuvtNeHOkIYPIoU6kgox9qp1derccWuMtwLT +PjhnWuCAX5dN1d7Rve4EovkjvuomDuZy6NCQlmLA6ff2pxtcJAkGy8Blc5VQeWs9 +i9DUFz2Sx6olEO7MDykj4B6O2YNlAwb8xq1oivE24laZPufprwI= +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/targets/github_app_access_token/files/app-private-key.pem.license b/tests/integration/targets/github_app_access_token/files/app-private-key.pem.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/github_app_access_token/files/app-private-key.pem.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/github_app_access_token/tasks/main.yml b/tests/integration/targets/github_app_access_token/tasks/main.yml new file mode 100644 index 0000000000..9b7ba5d2c1 --- /dev/null +++ b/tests/integration/targets/github_app_access_token/tasks/main.yml @@ -0,0 +1,30 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Test code for the github_app_access_token plugin. +# +# Copyright (c) 2017-2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install JWT + ansible.builtin.pip: + name: + - jwt + +- name: Read file + ansible.builtin.set_fact: + github_app_private_key: "{{ lookup('ansible.builtin.file', 'app-private-key.pem') }}" + +- name: Generate Github App Token + register: github_app_access_token + ignore_errors: true + ansible.builtin.set_fact: + github_app_token: "{{ lookup('community.general.github_app_access_token', app_id=github_app_id, installation_id=github_app_installation_id, private_key=github_app_private_key) }}" + +- assert: + that: + - github_app_access_token is failed + - '"Github return error" in github_app_access_token.msg' diff --git a/tests/integration/targets/github_app_access_token/vars/main.yml b/tests/integration/targets/github_app_access_token/vars/main.yml new file mode 100644 index 0000000000..35bd49cd3e --- /dev/null +++ b/tests/integration/targets/github_app_access_token/vars/main.yml @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +github_app_id: 123456 +github_app_installation_id: 123456 diff --git a/tests/unit/plugins/lookup/test_github_app_access_token.py b/tests/unit/plugins/lookup/test_github_app_access_token.py index 4bf9c7e704..7971335a47 100644 --- a/tests/unit/plugins/lookup/test_github_app_access_token.py +++ b/tests/unit/plugins/lookup/test_github_app_access_token.py @@ -32,7 +32,7 @@ class MockResponse(MagicMock): class TestLookupModule(unittest.TestCase): - def test_get_token(self): + def test_get_token_with_file(self): with patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", open=mock_open(read_data="foo_bar"), open_url=MagicMock(return_value=MockResponse()), @@ -50,3 +50,21 @@ class TestLookupModule(unittest.TestCase): token_expiry=600 ) ) + + def test_get_token_with_fact(self): + with patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", + open_url=MagicMock(return_value=MockResponse()), + jwk_from_pem=MagicMock(return_value='private_key'), + jwt_instance=MockJWT(), + HAS_JWT=True): + lookup = lookup_loader.get('community.general.github_app_access_token') + self.assertListEqual( + [MockResponse.response_token], + lookup.run( + [], + app_id="app_id", + installation_id="installation_id", + private_key="foo_bar", + token_expiry=600 + ) + ) From a8c41ac4c19cc966ccd2a4bcdf94e8b7b557dc26 Mon Sep 17 00:00:00 2001 From: Boris Budini Date: Mon, 21 Oct 2024 20:53:56 +0200 Subject: [PATCH 303/482] Update keycloak_realm, add organizations_enabled (#8927) * Update keycloak_realm.py, add organizations_enabled * Update plugins/modules/keycloak_realm.py Co-authored-by: Felix Fontein * Create 9027-support-organizations-in-keycloak-realm.yml Add changelog file * Bump version_added * Update version_added * Update changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../9027-support-organizations-in-keycloak-realm.yml | 2 ++ plugins/modules/keycloak_realm.py | 8 ++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml diff --git a/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml b/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml new file mode 100644 index 0000000000..7866cc53b8 --- /dev/null +++ b/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add boolean toggle to configure organization support for a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027, https://github.com/ansible-collections/community.general/pull/8927/). diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 9bbcdb6b1a..747acf3081 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -384,6 +384,13 @@ options: aliases: - passwordPolicy type: str + organizations_enabled: + description: + - Enables support for experimental organization feature. + aliases: + - organizationsEnabled + type: bool + version_added: 10.0.0 permanent_lockout: description: - The realm permanent lockout. @@ -686,6 +693,7 @@ def main(): otp_policy_type=dict(type='str', aliases=['otpPolicyType']), otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), + organizations_enabled=dict(type='bool', aliases=['organizationsEnabled']), permanent_lockout=dict(type='bool', aliases=['permanentLockout']), quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), From ccf7f62325d17d794a184ad2ab18e0ca3655b87b Mon Sep 17 00:00:00 2001 From: Navaneeth S Date: Wed, 23 Oct 2024 04:35:46 +1030 Subject: [PATCH 304/482] Update documentation in apk.py (#9045) * Update apk.py Fix for issue #9017 * Update plugins/modules/apk.py Added a line break to fix the issue of the line being too long. Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/apk.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py index a6b058b932..7caefd1357 100644 --- a/plugins/modules/apk.py +++ b/plugins/modules/apk.py @@ -74,6 +74,7 @@ options: world: description: - Use a custom world file when checking for explicitly installed packages. + The file is used only when a value is provided for O(name), and O(state) is set to V(present) or V(latest). type: str default: /etc/apk/world version_added: 5.4.0 From 39f3b151e84b1ec772eeaa550c28484c8a2f2331 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Oct 2024 21:36:12 +1300 Subject: [PATCH 305/482] ansible_galaxy_install: add return value version (#9060) * add return value version * add changelog frag --- .../fragments/9060-ansible-galaxy-install-version.yml | 2 ++ plugins/modules/ansible_galaxy_install.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9060-ansible-galaxy-install-version.yml diff --git a/changelogs/fragments/9060-ansible-galaxy-install-version.yml b/changelogs/fragments/9060-ansible-galaxy-install-version.yml new file mode 100644 index 0000000000..87d5137ad2 --- /dev/null +++ b/changelogs/fragments/9060-ansible-galaxy-install-version.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060). diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index 62de70bb63..ea35b3afc0 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -188,6 +188,12 @@ new_roles: sample: ansistrano.deploy: 3.8.0 baztian.xfce: v0.0.3 +version: + description: Version of ansible-core for ansible-galaxy. + type: str + returned: always + sample: 2.17.4 + version_added: 10.0.0 """ import re @@ -252,7 +258,6 @@ class AnsibleGalaxyInstall(ModuleHelper): if not match: self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line)) version = match.group("version") - version = tuple(int(x) for x in version.split('.')[:3]) return version try: @@ -265,7 +270,8 @@ class AnsibleGalaxyInstall(ModuleHelper): return runner, ctx.run() def __init_module__(self): - self.runner, self.ansible_version = self._get_ansible_galaxy_version() + self.runner, self.vars.version = self._get_ansible_galaxy_version() + self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3]) if self.ansible_version < (2, 11): self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") self.vars.set("new_collections", {}, change=True) From 107df41d9c8e9da9de3e8c61f93d2614891e5ef3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Oct 2024 21:36:38 +1300 Subject: [PATCH 306/482] django_command/django_check/django_createcachetable: add return value version (#9063) * add return value version * add changelog frag * reformat yaml --- changelogs/fragments/9063-django-version.yml | 5 +++ plugins/module_utils/django.py | 18 +++++--- plugins/modules/django_check.py | 41 +++++++++++-------- plugins/modules/django_command.py | 21 +++++++--- plugins/modules/django_createcachetable.py | 19 ++++++--- .../plugins/modules/test_django_check.yaml | 16 +++++++- .../plugins/modules/test_django_command.yaml | 12 +++++- .../modules/test_django_createcachetable.yaml | 7 +++- 8 files changed, 104 insertions(+), 35 deletions(-) create mode 100644 changelogs/fragments/9063-django-version.yml diff --git a/changelogs/fragments/9063-django-version.yml b/changelogs/fragments/9063-django-version.yml new file mode 100644 index 0000000000..3d0287a756 --- /dev/null +++ b/changelogs/fragments/9063-django-version.yml @@ -0,0 +1,5 @@ +minor_changes: + - django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063). + - django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). + - django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). + - django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index 5fb375c6fd..8314ed945e 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -32,6 +32,7 @@ _django_std_arg_fmts = dict( verbosity=cmd_runner_fmt.as_opt_val("--verbosity"), no_color=cmd_runner_fmt.as_fixed("--no-color"), skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), + version=cmd_runner_fmt.as_fixed("--version"), ) _django_database_args = dict( @@ -60,6 +61,9 @@ class _DjangoRunner(PythonRunner): ) return super(_DjangoRunner, self).__call__(args_order, output_process, ignore_value_none, check_mode_skip, check_mode_return, **kwargs) + def bare_context(self, *args, **kwargs): + return super(_DjangoRunner, self).__call__(*args, **kwargs) + class DjangoModuleHelper(ModuleHelper): module = {} @@ -98,16 +102,20 @@ class DjangoModuleHelper(ModuleHelper): arg_formats=self.arg_formats, venv=self.vars.venv, check_rc=True) + + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + + rc, out, err = runner.bare_context("version").run() + self.vars.version = out.strip() + with runner() as ctx: - run_params = self.vars.as_dict() - if self._check_mode_arg: - run_params.update({self._check_mode_arg: self.check_mode}) results = ctx.run(**run_params) self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - if self.verbosity >= 3: - self.vars.run_info = ctx.run_info + self.vars.set("run_info", ctx.run_info, verbosity=3) return results diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py index 1553da7a30..7a12ec94e2 100644 --- a/plugins/modules/django_check.py +++ b/plugins/modules/django_check.py @@ -8,48 +8,49 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ +--- module: django_check author: - - Alexei Znamensky (@russoz) +- Alexei Znamensky (@russoz) short_description: Wrapper for C(django-admin check) version_added: 9.1.0 description: - - This module is a wrapper for the execution of C(django-admin check). +- This module is a wrapper for the execution of C(django-admin check). extends_documentation_fragment: - - community.general.attributes - - community.general.django +- community.general.attributes +- community.general.django options: database: description: - - Specify databases to run checks against. - - If not specified, Django will not run database tests. + - Specify databases to run checks against. + - If not specified, Django will not run database tests. type: list elements: str deploy: description: - - Include additional checks relevant in a deployment setting. + - Include additional checks relevant in a deployment setting. type: bool default: false fail_level: description: - - Message level that will trigger failure. - - Default is the Django default value. Check the documentation for the version being used. + - Message level that will trigger failure. + - Default is the Django default value. Check the documentation for the version being used. type: str choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] tags: description: - - Restrict checks to specific tags. + - Restrict checks to specific tags. type: list elements: str apps: description: - - Restrict checks to specific applications. - - Default is to check all applications. + - Restrict checks to specific applications. + - Default is to check all applications. type: list elements: str notes: - - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). - - The module will fail if RV(ignore:rc) is not zero. +- The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). +- The module will fail if RV(ignore:rc) is not zero. attributes: check_mode: support: full @@ -58,6 +59,7 @@ attributes: """ EXAMPLES = """ +--- - name: Check the entire project community.general.django_check: settings: myproject.settings @@ -65,18 +67,25 @@ EXAMPLES = """ - name: Create the project using specific databases community.general.django_check: database: - - somedb - - myotherdb + - somedb + - myotherdb settings: fancysite.settings pythonpath: /home/joedoe/project/fancysite venv: /home/joedoe/project/fancysite/venv """ RETURN = """ +--- run_info: description: Command-line execution information. type: dict returned: success and C(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 """ from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py index dcb8d26313..2d6d36ad74 100644 --- a/plugins/modules/django_command.py +++ b/plugins/modules/django_command.py @@ -8,16 +8,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ +--- module: django_command author: - - Alexei Znamensky (@russoz) +- Alexei Znamensky (@russoz) short_description: Run Django admin commands version_added: 9.0.0 description: - - This module allows the execution of arbitrary Django admin commands. +- This module allows the execution of arbitrary Django admin commands. extends_documentation_fragment: - - community.general.attributes - - community.general.django +- community.general.attributes +- community.general.django attributes: check_mode: support: none @@ -26,17 +27,18 @@ attributes: options: command: description: - - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. + - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. type: str required: true extra_args: type: list elements: str description: - - List of extra arguments passed to the django admin command. + - List of extra arguments passed to the django admin command. """ EXAMPLES = """ +--- - name: Check the project community.general.django_command: command: check @@ -51,10 +53,17 @@ EXAMPLES = """ """ RETURN = """ +--- run_info: description: Command-line execution information. type: dict returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 """ import shlex diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py index b038e0358f..85f5774294 100644 --- a/plugins/modules/django_createcachetable.py +++ b/plugins/modules/django_createcachetable.py @@ -8,17 +8,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ +--- module: django_createcachetable author: - - Alexei Znamensky (@russoz) +- Alexei Znamensky (@russoz) short_description: Wrapper for C(django-admin createcachetable) version_added: 9.1.0 description: - - This module is a wrapper for the execution of C(django-admin createcachetable). +- This module is a wrapper for the execution of C(django-admin createcachetable). extends_documentation_fragment: - - community.general.attributes - - community.general.django - - community.general.django.database +- community.general.attributes +- community.general.django +- community.general.django.database attributes: check_mode: support: full @@ -27,6 +28,7 @@ attributes: """ EXAMPLES = """ +--- - name: Create cache table in the default database community.general.django_createcachetable: settings: myproject.settings @@ -40,10 +42,17 @@ EXAMPLES = """ """ RETURN = """ +--- run_info: description: Command-line execution information. type: dict returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 """ from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper diff --git a/tests/unit/plugins/modules/test_django_check.yaml b/tests/unit/plugins/modules/test_django_check.yaml index 91a8ff1953..74374c01c9 100644 --- a/tests/unit/plugins/modules/test_django_check.yaml +++ b/tests/unit/plugins/modules/test_django_check.yaml @@ -7,11 +7,18 @@ - id: success input: settings: whatever.settings + output: + version: "5.1.2" mocks: run_command: - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] + - command: [/testbin/python, -m, django, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 + out: "5.1.2\n" + err: "" + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] + environ: *env-def + rc: 0 out: "whatever\n" err: "" - id: multiple_databases @@ -20,8 +27,15 @@ database: - abc - def + output: + version: "5.1.2" mocks: run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: "" - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml index 2a19351083..960dc1a24f 100644 --- a/tests/unit/plugins/modules/test_django_command.yaml +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -15,9 +15,14 @@ settings: whatever.settings mocks: run_command: - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + - command: [/testbin/python, -m, django, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 + out: "5.1.2\n" + err: "" + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + environ: *env-def + rc: 0 out: "whatever\n" err: "" - id: command_fail @@ -33,6 +38,11 @@ failed: true mocks: run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: "" - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: *env-def rc: 1 diff --git a/tests/unit/plugins/modules/test_django_createcachetable.yaml b/tests/unit/plugins/modules/test_django_createcachetable.yaml index 22b7dcb304..a58146144a 100644 --- a/tests/unit/plugins/modules/test_django_createcachetable.yaml +++ b/tests/unit/plugins/modules/test_django_createcachetable.yaml @@ -9,8 +9,13 @@ settings: whatever.settings mocks: run_command: + - command: [/testbin/python, -m, django, --version] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "5.1.2\n" + err: "" - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] - environ: {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + environ: *env-def rc: 0 out: "whatever\n" err: "" From ce1b9887b182df32f8dd5e78d70521b6574fdb55 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Oct 2024 21:36:52 +1300 Subject: [PATCH 307/482] gconftool2/gconftool2_info: add return value version (#9064) * add return value version * add changelog frag --- .../fragments/9064-gconftool2-version.yml | 4 +++ plugins/module_utils/gconftool2.py | 1 + plugins/modules/gconftool2.py | 10 +++++++ plugins/modules/gconftool2_info.py | 9 ++++++ .../unit/plugins/modules/test_gconftool2.yaml | 30 ++++++++++++++++++- .../plugins/modules/test_gconftool2_info.yaml | 12 +++++++- 6 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9064-gconftool2-version.yml diff --git a/changelogs/fragments/9064-gconftool2-version.yml b/changelogs/fragments/9064-gconftool2-version.yml new file mode 100644 index 0000000000..7913c76a81 --- /dev/null +++ b/changelogs/fragments/9064-gconftool2-version.yml @@ -0,0 +1,4 @@ +minor_changes: + - gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064). + - gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). + - gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py index e90c3fb2cb..8e04f9ee3f 100644 --- a/plugins/module_utils/gconftool2.py +++ b/plugins/module_utils/gconftool2.py @@ -27,6 +27,7 @@ def gconftool2_runner(module, **kwargs): value=cmd_runner_fmt.as_list(), direct=cmd_runner_fmt.as_bool("--direct"), config_source=cmd_runner_fmt.as_opt_val("--config-source"), + version=cmd_runner_fmt.as_fixed("--version"), ), **kwargs ) diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index 7cf9a92c44..1759665294 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -96,6 +96,12 @@ previous_value: returned: success type: str sample: "Serif 12" +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper @@ -129,6 +135,10 @@ class GConftool(StateModuleHelper): if not self.vars.direct and self.vars.config_source is not None: self.do_raise('If the "config_source" is specified then "direct" must be "true"') + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + self.vars.set('previous_value', self._get(), fact=True) self.vars.set('value_type', self.vars.value_type) self.vars.set('_value', self.vars.previous_value, output=False, change=True) diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index ebe2121ad1..6fb274e038 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -50,6 +50,12 @@ value: returned: success type: str sample: Monospace 10 +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -68,6 +74,9 @@ class GConftoolInfo(ModuleHelper): def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() def __run__(self): with self.runner.context(args_order=["state", "key"]) as ctx: diff --git a/tests/unit/plugins/modules/test_gconftool2.yaml b/tests/unit/plugins/modules/test_gconftool2.yaml index 084741e6d1..badbdf2614 100644 --- a/tests/unit/plugins/modules/test_gconftool2.yaml +++ b/tests/unit/plugins/modules/test_gconftool2.yaml @@ -13,11 +13,17 @@ output: new_value: '200' changed: true + version: "3.2.6" mocks: run_command: - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + - command: [/testbin/gconftool-2, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 + out: "3.2.6\n" + err: "" + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 out: "100\n" err: "" - command: [/testbin/gconftool-2, --type, int, --set, /desktop/gnome/background/picture_filename, "200"] @@ -39,8 +45,14 @@ output: new_value: '200' changed: false + version: "3.2.5" mocks: run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.5\n" + err: "" - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -65,8 +77,14 @@ output: new_value: 'false' changed: false + version: "3.2.4" mocks: run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: "" - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 @@ -91,6 +109,11 @@ changed: true mocks: run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: "" - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -110,6 +133,11 @@ changed: false mocks: run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: "" - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gconftool2_info.yaml b/tests/unit/plugins/modules/test_gconftool2_info.yaml index 26db16a368..fac04430a0 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.yaml +++ b/tests/unit/plugins/modules/test_gconftool2_info.yaml @@ -11,9 +11,14 @@ value: '100' mocks: run_command: - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + - command: [/testbin/gconftool-2, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 + out: "3.2.6\n" + err: "" + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 out: "100\n" err: "" - id: test_simple_element_get_not_found @@ -23,6 +28,11 @@ value: mocks: run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.6\n" + err: "" - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 From d0b4e91cac95b2b3c0984704799b1f1f2d4ce8b8 Mon Sep 17 00:00:00 2001 From: salmon111 Date: Tue, 29 Oct 2024 05:09:02 +0900 Subject: [PATCH 308/482] modprobe: fix --check mode not being honored for persistent option (#9052) * modprobe: fix --check mode not being honored for persistent option * Add CHANGELOG * Update CHANGELOG * Update changelogs/fragments/9052-modprobe-bugfix.yml Co-authored-by: Felix Fontein --------- Co-authored-by: d-usuba Co-authored-by: Felix Fontein --- changelogs/fragments/9052-modprobe-bugfix.yml | 2 ++ plugins/modules/modprobe.py | 14 ++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/9052-modprobe-bugfix.yml diff --git a/changelogs/fragments/9052-modprobe-bugfix.yml b/changelogs/fragments/9052-modprobe-bugfix.yml new file mode 100644 index 0000000000..b9519e9055 --- /dev/null +++ b/changelogs/fragments/9052-modprobe-bugfix.yml @@ -0,0 +1,2 @@ +bugfixes: + - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052). diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index f271b3946f..57e682245f 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -163,8 +163,9 @@ class Modprobe(object): def create_module_file(self): file_path = os.path.join(MODULES_LOAD_LOCATION, self.name + '.conf') - with open(file_path, 'w') as file: - file.write(self.name + '\n') + if not self.check_mode: + with open(file_path, 'w') as file: + file.write(self.name + '\n') @property def module_options_file_content(self): @@ -175,8 +176,9 @@ class Modprobe(object): def create_module_options_file(self): new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, self.name + '.conf') - with open(new_file_path, 'w') as file: - file.write(self.module_options_file_content) + if not self.check_mode: + with open(new_file_path, 'w') as file: + file.write(self.module_options_file_content) def disable_old_params(self): @@ -190,7 +192,7 @@ class Modprobe(object): file_content[index] = '#' + line content_changed = True - if content_changed: + if not self.check_mode and content_changed: with open(modprobe_file, 'w') as file: file.write('\n'.join(file_content)) @@ -206,7 +208,7 @@ class Modprobe(object): file_content[index] = '#' + line content_changed = True - if content_changed: + if not self.check_mode and content_changed: with open(module_file, 'w') as file: file.write('\n'.join(file_content)) From 3506f73da1452dc6c84afe15e7a366398eb970a2 Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Mon, 28 Oct 2024 14:10:48 -0600 Subject: [PATCH 309/482] Add UpdateUserAccountTypes command to redfish_command (#9059) * Add UpdateUserAccountTypes command to redfish_command https://github.com/ansible-collections/community.general/issues/9058 * Add changelog fragment * Update changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml Update changelog fragment Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...redfish_command-updateuseraccounttypes.yml | 2 ++ plugins/module_utils/redfish_utils.py | 21 +++++++++++++++++++ plugins/modules/redfish_command.py | 15 ++++++++++++- 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml diff --git a/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml b/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml new file mode 100644 index 0000000000..066a84e1e9 --- /dev/null +++ b/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058, https://github.com/ansible-collections/community.general/pull/9059). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 2ef928e510..2dab9d80f2 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1557,6 +1557,27 @@ class RedfishUtils(object): resp['msg'] = 'Modified account service' return resp + def update_user_accounttypes(self, user): + account_types = user.get('account_accounttypes') + oemaccount_types = user.get('account_oemaccounttypes') + if account_types is None and oemaccount_types is None: + return {'ret': False, 'msg': + 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + + uri = response['uri'] + payload = {} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) + def check_password_change_required(self, return_data): """ Checks a response if a user needs to change their password diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index df541a1bd3..103f9e1d50 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -549,6 +549,18 @@ EXAMPLES = ''' AccountLockoutThreshold: 5 AccountLockoutDuration: 600 + - name: Update user AccountTypes + community.general.redfish_command: + category: Accounts + command: UpdateUserAccountTypes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_types: + - Redfish + - WebUI + - name: Clear Manager Logs with a timeout of 20 seconds community.general.redfish_command: category: Manager @@ -810,7 +822,7 @@ CATEGORY_COMMANDS_ALL = { "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", - "UpdateAccountServiceProperties"], + "UpdateUserAccountTypes", "UpdateAccountServiceProperties"], "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", "ResetToDefaults", @@ -978,6 +990,7 @@ def main(): "UpdateUserRole": rf_utils.update_user_role, "UpdateUserPassword": rf_utils.update_user_password, "UpdateUserName": rf_utils.update_user_name, + "UpdateUserAccountTypes": rf_utils.update_user_accounttypes, "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties } From 67d1b6c4139981239acafe499538e09a186e40d6 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Mon, 28 Oct 2024 23:11:39 +0300 Subject: [PATCH 310/482] one_image/one_image_info: Fix class typo (#9056) * Fix one_image class method args * Add CHANGELOG fragment * PR fix * PR fixes --- changelogs/fragments/9056-fix-one_image-modules.yml | 3 +++ plugins/modules/one_image.py | 12 ++++++------ plugins/modules/one_image_info.py | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/9056-fix-one_image-modules.yml diff --git a/changelogs/fragments/9056-fix-one_image-modules.yml b/changelogs/fragments/9056-fix-one_image-modules.yml new file mode 100644 index 0000000000..31b85904fa --- /dev/null +++ b/changelogs/fragments/9056-fix-one_image-modules.yml @@ -0,0 +1,3 @@ +bugfixes: + - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). + - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index 86db3b0405..5877142cdf 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -462,7 +462,7 @@ class ImageModule(OpenNebulaModule): if changed and not self.module.check_mode: self.one.image.enable(image.ID, enable) - result = OpenNebulaModule.get_image_info(image) + result = self.get_image_info(image) result['changed'] = changed return result @@ -486,7 +486,7 @@ class ImageModule(OpenNebulaModule): if changed and not self.module.check_mode: self.one.image.persistent(image.ID, enable) - result = OpenNebulaModule.get_image_info(image) + result = self.get_image_info(image) result['changed'] = changed return result @@ -497,7 +497,7 @@ class ImageModule(OpenNebulaModule): tmp_image = self.get_image_by_name(new_name) if tmp_image: - result = OpenNebulaModule.get_image_info(tmp_image) + result = self.get_image_info(image) result['changed'] = False return result @@ -509,7 +509,7 @@ class ImageModule(OpenNebulaModule): self.wait_for_ready(new_id) image = self.one.image.info(new_id) - result = OpenNebulaModule.get_image_info(image) + result = self.get_image_info(image) result['changed'] = True return result @@ -519,7 +519,7 @@ class ImageModule(OpenNebulaModule): self.module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") if new_name == image.NAME: - result = OpenNebulaModule.get_image_info(image) + result = self.get_image_info(image) result['changed'] = False return result @@ -530,7 +530,7 @@ class ImageModule(OpenNebulaModule): if not self.module.check_mode: self.one.image.rename(image.ID, new_name) - result = OpenNebulaModule.get_image_info(image) + result = self.get_image_info(image) result['changed'] = True return result diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py index 2ad0f3c493..4bc48dfda1 100644 --- a/plugins/modules/one_image_info.py +++ b/plugins/modules/one_image_info.py @@ -307,7 +307,7 @@ class ImageInfoModule(OpenNebulaModule): images = self.get_all_images().IMAGE self.result = { - 'images': [OpenNebulaModule.get_image_info(image) for image in images] + 'images': [self.get_image_info(image) for image in images] } self.exit() From 9069f673e27fc1ca7998e57dd6ae056e614808ca Mon Sep 17 00:00:00 2001 From: Robzz Date: Mon, 28 Oct 2024 21:13:06 +0100 Subject: [PATCH 311/482] proxmox_kvm: add support for ciupgrade parameter (#9066) * proxmox_kvm: add support for ciupgrade parameter * add changelog fragment * proxmox_kvm: version_added specifier for ciupgrade parameter * proxmox_kvm: remove default value from docs for ciupgrade parameter --- .../fragments/9066-proxmox-kvm-ciupgrade.yml | 2 ++ plugins/modules/proxmox_kvm.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml diff --git a/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml b/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml new file mode 100644 index 0000000000..91e9127b70 --- /dev/null +++ b/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066). diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index cac3496228..0c9904873d 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -86,6 +86,11 @@ options: type: str choices: ['nocloud', 'configdrive2'] version_added: 1.3.0 + ciupgrade: + description: + - 'cloud-init: do an automatic package upgrade after the first boot.' + type: bool + version_added: 10.0.0 ciuser: description: - 'cloud-init: username of default user to create.' @@ -984,6 +989,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): # Available only in PVE 4 only_v4 = ['force', 'protection', 'skiplock'] only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] + only_v8 = ['ciupgrade'] # valid clone parameters valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] @@ -1012,6 +1018,12 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): if p in kwargs: del kwargs[p] + # The features work only on PVE 8 + if pve_major_version < 8: + for p in only_v8: + if p in kwargs: + del kwargs[p] + # 'sshkeys' param expects an urlencoded string if 'sshkeys' in kwargs: urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') @@ -1207,6 +1219,7 @@ def main(): cicustom=dict(type='str'), cipassword=dict(type='str', no_log=True), citype=dict(type='str', choices=['nocloud', 'configdrive2']), + ciupgrade=dict(type='bool'), ciuser=dict(type='str'), clone=dict(type='str'), cores=dict(type='int'), @@ -1414,6 +1427,7 @@ def main(): cicustom=module.params['cicustom'], cipassword=module.params['cipassword'], citype=module.params['citype'], + ciupgrade=module.params['ciupgrade'], ciuser=module.params['ciuser'], cpulimit=module.params['cpulimit'], cpuunits=module.params['cpuunits'], From c71f662d55d5fb931ab3817a2e627fd357f76ce2 Mon Sep 17 00:00:00 2001 From: Mike Raineri Date: Tue, 29 Oct 2024 13:45:25 -0400 Subject: [PATCH 312/482] Redfish: Added handling for trailing slashes in URIs when extracting member identifiers (#9057) Signed-off-by: Mike Raineri --- .../fragments/9047-redfish-uri-parsing.yml | 2 ++ plugins/module_utils/redfish_utils.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/9047-redfish-uri-parsing.yml diff --git a/changelogs/fragments/9047-redfish-uri-parsing.yml b/changelogs/fragments/9047-redfish-uri-parsing.yml new file mode 100644 index 0000000000..83c9450f44 --- /dev/null +++ b/changelogs/fragments/9047-redfish-uri-parsing.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 2dab9d80f2..f795eac6cd 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -695,7 +695,7 @@ class RedfishUtils(object): entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] + log_name = log_svcs_uri.rstrip('/').split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) @@ -1052,7 +1052,7 @@ class RedfishUtils(object): if 'Drives' in data[u'Links']: for link in data[u'Links'][u'Drives']: drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] + drive_id = drive_id_link.rstrip('/').split('/')[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) @@ -3453,7 +3453,7 @@ class RedfishUtils(object): # Capture list of URIs that match a specified HostInterface resource Id if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]] + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] if hostinterface_id and matching_hostinterface_uris: hostinterface_uri = list.pop(matching_hostinterface_uris) elif hostinterface_id and not matching_hostinterface_uris: @@ -3572,12 +3572,12 @@ class RedfishUtils(object): result = {} if manager is None: if len(self.manager_uris) == 1: - manager = self.manager_uris[0].split('/')[-1] + manager = self.manager_uris[0].rstrip('/').split('/')[-1] elif len(self.manager_uris) > 1: entries = self.get_multi_manager_inventory()['entries'] managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] if len(managers) == 1: - manager = managers[0].split('/')[-1] + manager = managers[0].rstrip('/').split('/')[-1] else: self.module.fail_json(msg=[ "Multiple managers with ServiceIdentification were found: %s" % str(managers), @@ -3735,7 +3735,7 @@ class RedfishUtils(object): # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: @@ -3763,7 +3763,7 @@ class RedfishUtils(object): # Delete each volume for volume in self.volume_uris: - if volume.split("/")[-1] in volume_ids: + if volume.rstrip('/').split('/')[-1] in volume_ids: response = self.delete_request(self.root_uri + volume) if response['ret'] is False: return response @@ -3797,7 +3797,7 @@ class RedfishUtils(object): # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: From e63c2f54cfa5c456f660fb138403ce223de12f41 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:27:43 +1300 Subject: [PATCH 313/482] gio_mime: add return value version (#9067) * add return value version * add changelog frag --- changelogs/fragments/9067-gio-mime-version.yml | 3 +++ plugins/module_utils/gio_mime.py | 1 + plugins/modules/gio_mime.py | 9 +++++++++ tests/unit/plugins/modules/test_gio_mime.yaml | 17 ++++++++++++++++- 4 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9067-gio-mime-version.yml diff --git a/changelogs/fragments/9067-gio-mime-version.yml b/changelogs/fragments/9067-gio-mime-version.yml new file mode 100644 index 0000000000..9e2fb76082 --- /dev/null +++ b/changelogs/fragments/9067-gio-mime-version.yml @@ -0,0 +1,3 @@ +minor_changes: + - gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067). + - gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067). diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py index e01709487d..132981a339 100644 --- a/plugins/module_utils/gio_mime.py +++ b/plugins/module_utils/gio_mime.py @@ -16,6 +16,7 @@ def gio_mime_runner(module, **kwargs): arg_formats=dict( mime_type=cmd_runner_fmt.as_list(), handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), ), **kwargs ) diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 20ccb22329..587aaec427 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -75,6 +75,12 @@ stderr: returned: failure type: str sample: 'gio: Failed to load info for handler "never-existed.desktop"' +version: + description: Version of gio. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -94,6 +100,9 @@ class GioMime(ModuleHelper): def __init_module__(self): self.runner = gio_mime_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) def __run__(self): diff --git a/tests/unit/plugins/modules/test_gio_mime.yaml b/tests/unit/plugins/modules/test_gio_mime.yaml index 75e5554c7c..f71e595701 100644 --- a/tests/unit/plugins/modules/test_gio_mime.yaml +++ b/tests/unit/plugins/modules/test_gio_mime.yaml @@ -13,9 +13,14 @@ changed: true mocks: run_command: - - command: [/testbin/gio, mime, x-scheme-handler/http] + - command: [/testbin/gio, mime, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 + out: "2.80.0\n" + err: "" + - command: [/testbin/gio, mime, x-scheme-handler/http] + environ: *env-def + rc: 0 out: "" err: > No default applications for “x-scheme-handler/http” @@ -35,6 +40,11 @@ skip: test helper does not support check mode yet mocks: run_command: + - command: [/testbin/gio, mime, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: "" - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 @@ -55,6 +65,11 @@ changed: false mocks: run_command: + - command: [/testbin/gio, mime, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: "" - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 From 9553dd9ddf96d728cb0fc43bcc8d3702780a8c01 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 31 Oct 2024 06:49:11 +0100 Subject: [PATCH 314/482] Stop using ansible.module_utils.compat.importlib (#9085) Stop using ansible.module_utils.compat.importlib. --- changelogs/fragments/9084-collection_version-importlib.yml | 2 ++ plugins/lookup/collection_version.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9084-collection_version-importlib.yml diff --git a/changelogs/fragments/9084-collection_version-importlib.yml b/changelogs/fragments/9084-collection_version-importlib.yml new file mode 100644 index 0000000000..827b9653d2 --- /dev/null +++ b/changelogs/fragments/9084-collection_version-importlib.yml @@ -0,0 +1,2 @@ +bugfixes: + - "collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084)." diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 33316fc2b0..0f93c03c26 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -63,11 +63,11 @@ RETURN = """ import json import os import re +from importlib import import_module import yaml from ansible.errors import AnsibleLookupError -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.lookup import LookupBase From 8a2ac4f1ebe9b8da6d3aa4ede77a0f59fd6fa64e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 3 Nov 2024 05:49:41 +1300 Subject: [PATCH 315/482] cpanm: add return value cpanm_version (#9061) * add return value version * add changelog frag * fix indentation * fix RV name and tests * Update plugins/modules/cpanm.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9061-cpanm-version.yml | 2 + plugins/modules/cpanm.py | 21 ++++ tests/unit/plugins/modules/test_cpanm.yaml | 128 +++++++++++++++++++- 3 files changed, 147 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/9061-cpanm-version.yml diff --git a/changelogs/fragments/9061-cpanm-version.yml b/changelogs/fragments/9061-cpanm-version.yml new file mode 100644 index 0000000000..af91cac1c0 --- /dev/null +++ b/changelogs/fragments/9061-cpanm-version.yml @@ -0,0 +1,2 @@ +minor_changes: + - cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061). diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 25489170dd..04b3b06b7f 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -142,7 +142,19 @@ EXAMPLES = """ version: '1.0' """ +RETURN = """ +--- +cpanm_version: + description: Version of CPANMinus. + type: str + returned: always + sample: "1.7047" + version_added: 10.0.0 +""" + + import os +import re from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -175,6 +187,7 @@ class CPANMinus(ModuleHelper): mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), installdeps=cmd_runner_fmt.as_bool("--installdeps"), pkg_spec=cmd_runner_fmt.as_list(), + cpanm_version=cmd_runner_fmt.as_fixed("--version"), ) use_old_vardict = False @@ -191,6 +204,14 @@ class CPANMinus(ModuleHelper): self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) self.vars.binary = self.runner.binary + with self.runner("cpanm_version") as ctx: + rc, out, err = ctx.run() + line = out.split('\n')[0] + match = re.search(r"version\s+([\d\.]+)\s+", line) + if not match: + self.do_raise("Failed to determine version number. First line of output: {0}".format(line)) + self.vars.cpanm_version = match.group(1) + def _is_package_installed(self, name, locallib, version): def process(rc, out, err): return rc == 0 diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index ad081254d6..ff4bd9c0f7 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -10,15 +10,23 @@ mode: compatibility output: changed: true + cpanm_version: "1.7047" mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/perl, -le, 'use Dancer;'] environ: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 2 out: "" err: "error, not installed" - command: [/testbin/cpanm, Dancer] - environ: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + environ: *env-def-true rc: 0 out: "" err: "" @@ -30,6 +38,13 @@ changed: false mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/perl, -le, 'use Dancer;'] environ: *env-def-false rc: 0 @@ -42,6 +57,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, Dancer] environ: *env-def-true rc: 0 @@ -55,6 +77,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -67,6 +96,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -81,6 +117,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -94,6 +137,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, /srv/webapps/my_app/src/] environ: *env-def-true rc: 0 @@ -109,6 +159,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, --notest, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -123,6 +180,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, --mirror, "http://cpan.cpantesters.org/", Dancer] environ: *env-def-true rc: 0 @@ -146,6 +210,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, Dancer~1.0] environ: *env-def-true rc: 0 @@ -160,6 +231,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, Dancer~1.5] environ: *env-def-true rc: 0 @@ -174,6 +252,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, Dancer@1.7] environ: *env-def-true rc: 0 @@ -188,7 +273,14 @@ failed: true msg: parameter 'version' must not be used when installing from a file mocks: - run_command: [] + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - id: install_specific_version_from_directory_error input: from_path: ~/ @@ -198,7 +290,14 @@ failed: true msg: parameter 'version' must not be used when installing from a directory mocks: - run_command: [] + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - id: install_specific_version_from_git_url_explicit input: name: "git://github.com/plack/Plack.git" @@ -208,6 +307,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@1.7"] environ: *env-def-true rc: 0 @@ -222,6 +328,13 @@ changed: true mocks: run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@2.5"] environ: *env-def-true rc: 0 @@ -236,4 +349,11 @@ failed: true msg: operator '~' not allowed in version parameter when installing from git repository mocks: - run_command: [] + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: "" From c8410a924eeb9dc8dbaef489ca52e5b74fd09b53 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 3 Nov 2024 05:49:54 +1300 Subject: [PATCH 316/482] opkg: add return value version (#9086) * opkg: add return value version * add changelog frag --- .../fragments/9086-gio-mime-version.yml | 2 ++ plugins/modules/opkg.py | 25 ++++++++++++----- tests/unit/plugins/modules/test_opkg.yaml | 27 ++++++++++++++++++- 3 files changed, 47 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/9086-gio-mime-version.yml diff --git a/changelogs/fragments/9086-gio-mime-version.yml b/changelogs/fragments/9086-gio-mime-version.yml new file mode 100644 index 0000000000..46c3e6cec8 --- /dev/null +++ b/changelogs/fragments/9086-gio-mime-version.yml @@ -0,0 +1,2 @@ +minor_changes: + - opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086). diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index 2f9794ab86..da51755efb 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -75,6 +75,7 @@ requirements: - opkg - python ''' + EXAMPLES = ''' - name: Install foo community.general.opkg: @@ -111,6 +112,15 @@ EXAMPLES = ''' force: overwrite ''' +RETURN = """ +version: + description: Version of opkg. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + import os from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper @@ -156,10 +166,15 @@ class Opkg(StateModuleHelper): state=cmd_runner_fmt.as_map(state_map), force=cmd_runner_fmt.as_func(_force), update_cache=cmd_runner_fmt.as_bool("update"), + version=cmd_runner_fmt.as_fixed("--version"), ), path_prefix=dir, ) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip().replace("opkg version ", "") + if self.vars.update_cache: rc, dummy, dummy = self.runner("update_cache").run() if rc != 0: @@ -186,13 +201,12 @@ class Opkg(StateModuleHelper): pkg_name, pkg_version = self.split_name_and_version(package) if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall": ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version): self.do_raise("failed to install %s" % package) self.vars.install_c += 1 - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info if self.vars.install_c > 0: - self.vars.msg = "installed %s package(s)" % (self.vars.install_c) + self.vars.msg = "installed %s package(s)" % self.vars.install_c else: self.vars.msg = "package(s) already present" @@ -202,13 +216,12 @@ class Opkg(StateModuleHelper): package, dummy = self.split_name_and_version(package) if not self._package_in_desired_state(package, want_installed=False): ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) if not self._package_in_desired_state(package, want_installed=False): self.do_raise("failed to remove %s" % package) self.vars.remove_c += 1 - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info if self.vars.remove_c > 0: - self.vars.msg = "removed %s package(s)" % (self.vars.remove_c) + self.vars.msg = "removed %s package(s)" % self.vars.remove_c else: self.vars.msg = "package(s) already absent" diff --git a/tests/unit/plugins/modules/test_opkg.yaml b/tests/unit/plugins/modules/test_opkg.yaml index 0cef54ac08..090a72c20c 100644 --- a/tests/unit/plugins/modules/test_opkg.yaml +++ b/tests/unit/plugins/modules/test_opkg.yaml @@ -12,11 +12,16 @@ msg: installed 1 package(s) mocks: run_command: - - command: [/testbin/opkg, list-installed, zlib-dev] + - command: [/testbin/opkg, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 out: "" err: "" + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: "" + err: "" - command: [/testbin/opkg, install, zlib-dev] environ: *env-def rc: 0 @@ -42,6 +47,11 @@ msg: package(s) already present mocks: run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: "" + err: "" - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -57,6 +67,11 @@ msg: installed 1 package(s) mocks: run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: "" + err: "" - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -85,6 +100,11 @@ msg: installed 1 package(s) mocks: run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: "" + err: "" - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -115,6 +135,11 @@ msg: installed 1 package(s) mocks: run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: "" + err: "" - command: [/testbin/opkg, update] environ: *env-def rc: 0 From 2429e228a464e0e409f3045dc640d91453f565a2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 3 Nov 2024 05:50:24 +1300 Subject: [PATCH 317/482] pipx/pipx_info: multiple fixes (#9044) * pipx_info: factored process_list out * pipx_info: no need to pass param to _list * pipx_info: minor adjustment * pipx mod utils: make_process_list parameters * fix test for state=install_all * fix assertions * pipx tests: fix detection of pipx 1.7.0 * pipx: use make_process_output * add testcase * pipx: remove import json * pinned in pipx list is not always there * Update plugins/modules/pipx_info.py Co-authored-by: Felix Fontein * remove ensurepath and --user from pipx install * add changelog frag * Update changelogs/fragments/9044-pipx-fixes.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/9044-pipx-fixes.yml * Update changelogs/fragments/9044-pipx-fixes.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/9044-pipx-fixes.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9044-pipx-fixes.yml | 7 ++ plugins/module_utils/pipx.py | 40 ++++++++ plugins/modules/pipx.py | 28 ++---- plugins/modules/pipx_info.py | 50 +++------- .../integration/targets/pipx/files/spec.json | 91 ------------------- .../targets/pipx/files/spec.json.license | 3 - tests/integration/targets/pipx/meta/main.yml | 7 ++ tests/integration/targets/pipx/tasks/main.yml | 35 ++++--- .../pipx/tasks/testcase-8809-installall.yml | 40 +++++++- .../targets/pipx/tasks/testcase-8809-pin.yml | 6 +- .../pipx/tasks/testcase-8809-uninjectpkg.yml | 4 +- .../pipx/tasks/testcase-9009-fixglobal.yml | 30 ++++++ 12 files changed, 165 insertions(+), 176 deletions(-) create mode 100644 changelogs/fragments/9044-pipx-fixes.yml delete mode 100644 tests/integration/targets/pipx/files/spec.json delete mode 100644 tests/integration/targets/pipx/files/spec.json.license create mode 100644 tests/integration/targets/pipx/meta/main.yml create mode 100644 tests/integration/targets/pipx/tasks/testcase-9009-fixglobal.yml diff --git a/changelogs/fragments/9044-pipx-fixes.yml b/changelogs/fragments/9044-pipx-fixes.yml new file mode 100644 index 0000000000..dbf0e3c10d --- /dev/null +++ b/changelogs/fragments/9044-pipx-fixes.yml @@ -0,0 +1,7 @@ +minor_changes: + - pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). + - pipx_info - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). + - pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044). +bugfixes: + - pipx module utils - add missing command line formatter for argument ``spec_metadata`` (https://github.com/ansible-collections/community.general/pull/9044). + - pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044). diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 513b9081f6..75b6621c1b 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -6,6 +6,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type + +import json + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt @@ -51,6 +55,7 @@ def pipx_runner(module, command, **kwargs): editable=fmt.as_bool("--editable"), pip_args=fmt.as_opt_eq_val('--pip-args'), suffix=fmt.as_opt_val('--suffix'), + spec_metadata=fmt.as_list(), ) arg_formats["global"] = fmt.as_bool("--global") @@ -63,3 +68,38 @@ def pipx_runner(module, command, **kwargs): **kwargs ) return runner + + +def make_process_list(mod_helper, **kwargs): + def process_list(rc, out, err): + if not out: + return [] + + results = [] + raw_data = json.loads(out) + if kwargs.get("include_raw"): + mod_helper.vars.raw_output = raw_data + + if kwargs["name"]: + if kwargs["name"] in raw_data['venvs']: + data = {kwargs["name"]: raw_data['venvs'][kwargs["name"]]} + else: + data = {} + else: + data = raw_data['venvs'] + + for venv_name, venv in data.items(): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'], + 'pinned': venv['metadata']['main_package'].get('pinned'), + } + if kwargs.get("include_injected"): + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + if kwargs.get("include_deps"): + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + results.append(entry) + + return results + + return process_list diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index c317ae8da8..9bde0f180c 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -191,10 +191,8 @@ EXAMPLES = """ """ -import json - from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list from ansible.module_utils.facts.compat import ansible_facts @@ -251,26 +249,14 @@ class PipX(StateModuleHelper): use_old_vardict = False def _retrieve_installed(self): - def process_list(rc, out, err): - if not out: - return {} + name = _make_name(self.vars.name, self.vars.suffix) + output_process = make_process_list(self, include_injected=True, name=name) + installed = self.runner('_list global', output_process=output_process).run() - results = {} - raw_data = json.loads(out) - for venv_name, venv in raw_data['venvs'].items(): - results[venv_name] = { - 'version': venv['metadata']['main_package']['package_version'], - 'injected': {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}, - } - return results - - installed = self.runner('_list', output_process=process_list).run(_list=1) - - if self.vars.name is not None: - name = _make_name(self.vars.name, self.vars.suffix) - app_list = installed.get(name) + if name is not None: + app_list = [app for app in installed if app['name'] == name] if app_list: - return {name: app_list} + return {name: app_list[0]} else: return {} diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 65c0ba552e..33fbad0e5d 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -98,6 +98,15 @@ application: type: dict sample: licenses: "0.6.1" + pinned: + description: + - Whether the installed application is pinned or not. + - When using C(pipx<=1.6.0), this returns C(null). + returned: success + type: bool + sample: + pinned: true + version_added: 10.0.0 raw_output: description: The raw output of the C(pipx list) command, when O(include_raw=true). Used for debugging. @@ -112,10 +121,8 @@ cmd: sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"] """ -import json - from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list from ansible.module_utils.facts.compat import ansible_facts @@ -143,41 +150,10 @@ class PipXInfo(ModuleHelper): self.command = [facts['python']['executable'], '-m', 'pipx'] self.runner = pipx_runner(self.module, self.command) - # self.vars.set('application', self._retrieve_installed(), change=True, diff=True) - def __run__(self): - def process_list(rc, out, err): - if not out: - return [] - - results = [] - raw_data = json.loads(out) - if self.vars.include_raw: - self.vars.raw_output = raw_data - - if self.vars.name: - if self.vars.name in raw_data['venvs']: - data = {self.vars.name: raw_data['venvs'][self.vars.name]} - else: - data = {} - else: - data = raw_data['venvs'] - - for venv_name, venv in data.items(): - entry = { - 'name': venv_name, - 'version': venv['metadata']['main_package']['package_version'] - } - if self.vars.include_injected: - entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} - if self.vars.include_deps: - entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) - results.append(entry) - - return results - - with self.runner('_list global', output_process=process_list) as ctx: - self.vars.application = ctx.run(_list=1) + output_process = make_process_list(self, **self.vars.as_dict()) + with self.runner('_list global', output_process=output_process) as ctx: + self.vars.application = ctx.run() self._capture_results(ctx) def _capture_results(self, ctx): diff --git a/tests/integration/targets/pipx/files/spec.json b/tests/integration/targets/pipx/files/spec.json deleted file mode 100644 index 3c85125337..0000000000 --- a/tests/integration/targets/pipx/files/spec.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "pipx_spec_version": "0.1", - "venvs": { - "black": { - "metadata": { - "injected_packages": {}, - "main_package": { - "app_paths": [ - { - "__Path__": "/home/az/.local/pipx/venvs/black/bin/black", - "__type__": "Path" - }, - { - "__Path__": "/home/az/.local/pipx/venvs/black/bin/blackd", - "__type__": "Path" - } - ], - "app_paths_of_dependencies": {}, - "apps": [ - "black", - "blackd" - ], - "apps_of_dependencies": [], - "include_apps": true, - "include_dependencies": false, - "man_pages": [], - "man_pages_of_dependencies": [], - "man_paths": [], - "man_paths_of_dependencies": {}, - "package": "black", - "package_or_url": "black", - "package_version": "24.8.0", - "pinned": false, - "pip_args": [], - "suffix": "" - }, - "pipx_metadata_version": "0.5", - "python_version": "Python 3.11.9", - "source_interpreter": { - "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", - "__type__": "Path" - }, - "venv_args": [] - } - }, - "pycowsay": { - "metadata": { - "injected_packages": {}, - "main_package": { - "app_paths": [ - { - "__Path__": "/home/az/.local/pipx/venvs/pycowsay/bin/pycowsay", - "__type__": "Path" - } - ], - "app_paths_of_dependencies": {}, - "apps": [ - "pycowsay" - ], - "apps_of_dependencies": [], - "include_apps": true, - "include_dependencies": false, - "man_pages": [ - "man6/pycowsay.6" - ], - "man_pages_of_dependencies": [], - "man_paths": [ - { - "__Path__": "/home/az/.local/pipx/venvs/pycowsay/share/man/man6/pycowsay.6", - "__type__": "Path" - } - ], - "man_paths_of_dependencies": {}, - "package": "pycowsay", - "package_or_url": "pycowsay", - "package_version": "0.0.0.2", - "pinned": false, - "pip_args": [], - "suffix": "" - }, - "pipx_metadata_version": "0.5", - "python_version": "Python 3.11.9", - "source_interpreter": { - "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", - "__type__": "Path" - }, - "venv_args": [] - } - }, - } -} diff --git a/tests/integration/targets/pipx/files/spec.json.license b/tests/integration/targets/pipx/files/spec.json.license deleted file mode 100644 index a1390a69ed..0000000000 --- a/tests/integration/targets/pipx/files/spec.json.license +++ /dev/null @@ -1,3 +0,0 @@ -Copyright (c) Ansible Project -GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/pipx/meta/main.yml b/tests/integration/targets/pipx/meta/main.yml new file mode 100644 index 0000000000..982de6eb03 --- /dev/null +++ b/tests/integration/targets/pipx/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index 30e96ef1bf..e764f17f68 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -3,10 +3,22 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: install pipx - pip: - name: pipx - extra_args: --user +- name: Determine pipx level + block: + - name: Install pipx>=1.7.0 + pip: + name: pipx>=1.7.0 + - name: Set has_pipx170 fact true + ansible.builtin.set_fact: + has_pipx170: true + rescue: + - name: Set has_pipx170 fact false + ansible.builtin.set_fact: + has_pipx170: false + - name: Install pipx (no version spec) + pip: + name: pipx + ############################################################################## - name: ensure application tox is uninstalled @@ -233,26 +245,21 @@ - name: Include testcase for issue 8656 ansible.builtin.include_tasks: testcase-8656.yml -- name: install pipx - pip: - name: pipx>=1.7.0 - extra_args: --user - ignore_errors: true - register: pipx170_install - - name: Recent features when: - - pipx170_install is not failed - - pipx170_install is changed + - has_pipx170 block: - name: Include testcase for PR 8793 --global ansible.builtin.include_tasks: testcase-8793-global.yml - name: Include testcase for PR 8809 install-all - ansible.builtin.include_tasks: testcase-8809-install-all.yml + ansible.builtin.include_tasks: testcase-8809-installall.yml - name: Include testcase for PR 8809 pin ansible.builtin.include_tasks: testcase-8809-pin.yml - name: Include testcase for PR 8809 injectpkg ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml + + - name: Include testcase for PR 9009 injectpkg --global + ansible.builtin.include_tasks: testcase-9009-fixglobal.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml index 37816247c0..9e770c1a98 100644 --- a/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml +++ b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml @@ -24,10 +24,39 @@ - pycowsay register: uninstall_all_1 + - name: Install pycowsay and black + community.general.pipx: + state: install + name: "{{ item }}" + loop: + - black + - pycowsay + register: install_all_1 + + - name: Generate JSON spec + community.general.pipx_info: + include_raw: true + register: pipx_list + + - name: Copy content + ansible.builtin.copy: + content: "{{ pipx_list.raw_output }}" + dest: "{{ remote_tmp_dir }}/spec.json" + mode: "0644" + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + - name: Use install-all community.general.pipx: - state: install-all - spec_metadata: spec.json + state: install_all + spec_metadata: "{{ remote_tmp_dir }}/spec.json" register: install_all - name: Run pycowsay (should succeed) @@ -47,13 +76,14 @@ loop: - black - pycowsay - register: uninstall_all_2 + register: uninstall_all_3 - name: Assert uninstall-all ansible.builtin.assert: that: - uninstall_all_1 is not changed + - install_all_1 is changed + - uninstall_all_2 is changed - install_all is changed - "'Moooooooo!' in what_the_cow_said.stdout" - - "'/usr/local/bin/pycowsay' in which_cow.stdout" - - uninstall_all_2 is changed + - uninstall_all_3 is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml index 89e4bb9dc6..c25073a719 100644 --- a/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml +++ b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml @@ -60,10 +60,10 @@ - pycowsay register: uninstall_all_2 - - name: Assert uninstall-all + - name: Assert pin/unpin ansible.builtin.assert: that: - pin_cow is changed - - cow_info_1 == "0.0.0.1" + - cow_info_1.application.0.version == "0.0.0.1" - unpin_cow is changed - - cow_info_2 != "0.0.0.1" + - cow_info_2.application.0.version != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml index 89e4bb9dc6..4092d6f244 100644 --- a/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml +++ b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml @@ -64,6 +64,6 @@ ansible.builtin.assert: that: - pin_cow is changed - - cow_info_1 == "0.0.0.1" + - cow_info_1.application.0.version == "0.0.0.1" - unpin_cow is changed - - cow_info_2 != "0.0.0.1" + - cow_info_2.application.0.version != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-9009-fixglobal.yml b/tests/integration/targets/pipx/tasks/testcase-9009-fixglobal.yml new file mode 100644 index 0000000000..ffcd2651d0 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-9009-fixglobal.yml @@ -0,0 +1,30 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9009-Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + global: true + +- name: 9009-Install application pylint + community.general.pipx: + name: pylint + global: true + register: install_pylint + +- name: 9009-Inject packages + community.general.pipx: + state: inject + name: pylint + global: true + inject_packages: + - licenses + +- name: 9009-Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + global: true From cecaa1840d907039ebe65bea50bb0383e75893c0 Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Sun, 3 Nov 2024 13:25:39 +0300 Subject: [PATCH 318/482] one_image: Add image creation and timeout (#9075) * Add creation one_image * Add CHANGELOG * PR fix docs * Add doc line for create --- .../fragments/9075-add-creation-oneimage.yml | 3 + plugins/modules/one_image.py | 169 +++++++++++++----- 2 files changed, 124 insertions(+), 48 deletions(-) create mode 100644 changelogs/fragments/9075-add-creation-oneimage.yml diff --git a/changelogs/fragments/9075-add-creation-oneimage.yml b/changelogs/fragments/9075-add-creation-oneimage.yml new file mode 100644 index 0000000000..96420d24ef --- /dev/null +++ b/changelogs/fragments/9075-add-creation-oneimage.yml @@ -0,0 +1,3 @@ +minor_changes: + - one_image - add ``create``, ``template`` and ``datastore_id`` arguments for image creation (https://github.com/ansible-collections/community.general/pull/9075). + - one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075). diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index 5877142cdf..68db40adb4 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -20,42 +20,65 @@ extends_documentation_fragment: - community.general.opennebula - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - id: - description: - - A O(id) of the image you would like to manage. - type: int - name: - description: - - A O(name) of the image you would like to manage. - type: str - state: - description: - - V(present) - state that is used to manage the image - - V(absent) - delete the image - - V(cloned) - clone the image - - V(renamed) - rename the image to the O(new_name) - choices: ["present", "absent", "cloned", "renamed"] - default: present - type: str - enabled: - description: - - Whether the image should be enabled or disabled. - type: bool - new_name: - description: - - A name that will be assigned to the existing or new image. - - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'. - type: str - persistent: - description: - - Whether the image should be persistent or non-persistent. - type: bool - version_added: 9.5.0 + id: + description: + - A O(id) of the image you would like to manage. + type: int + name: + description: + - A O(name) of the image you would like to manage. + - Required if O(create=true). + type: str + state: + description: + - V(present) - state that is used to manage the image. + - V(absent) - delete the image. + - V(cloned) - clone the image. + - V(renamed) - rename the image to the O(new_name). + choices: ["present", "absent", "cloned", "renamed"] + default: present + type: str + enabled: + description: + - Whether the image should be enabled or disabled. + type: bool + new_name: + description: + - A name that will be assigned to the existing or new image. + - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'. + type: str + persistent: + description: + - Whether the image should be persistent or non-persistent. + type: bool + version_added: 9.5.0 + create: + description: + - Whether the image should be created if not present. + - This is ignored if O(state=absent). + type: bool + version_added: 10.0.0 + template: + description: + - Use with O(create=true) to specify image template. + type: str + version_added: 10.0.0 + datastore_id: + description: + - Use with O(create=true) to specify datastore for image. + type: int + version_added: 10.0.0 + wait_timeout: + description: + - Seconds to wait until image is ready, deleted or cloned. + type: int + default: 60 + version_added: 10.0.0 author: - "Milan Ilic (@ilicmilan)" ''' @@ -102,6 +125,35 @@ EXAMPLES = ''' community.general.one_image: id: '{{ result.id }}' state: absent + +- name: Make sure IMAGE is present + community.general.one_image: + name: myyy-image + state: present + create: true + datastore_id: 100 + template: | + PATH = "/var/tmp/image" + TYPE = "OS" + SIZE = 20512 + FORMAT = "qcow2" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" + +- name: Make sure IMAGE is present with a longer timeout + community.general.one_image: + name: big-image + state: present + create: true + datastore_id: 100 + wait_timeout: 900 + template: | + PATH = "https://192.0.2.200/repo/tipa_image.raw" + TYPE = "OS" + SIZE = 82048 + FORMAT = "raw" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" ''' RETURN = ''' @@ -328,15 +380,20 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', class ImageModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), + id=dict(type='int'), + name=dict(type='str'), state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), - enabled=dict(type='bool', required=False), - new_name=dict(type='str', required=False), - persistent=dict(type='bool', required=False), + enabled=dict(type='bool'), + new_name=dict(type='str'), + persistent=dict(type='bool'), + create=dict(type='bool'), + template=dict(type='str'), + datastore_id=dict(type='int'), + wait_timeout=dict(type='int', default=60), ) required_if = [ - ['state', 'renamed', ['id']] + ['state', 'renamed', ['id']], + ['create', True, ['template', 'datastore_id', 'name']], ] mutually_exclusive = [ ['id', 'name'], @@ -356,26 +413,32 @@ class ImageModule(OpenNebulaModule): enabled = params.get('enabled') new_name = params.get('new_name') persistent = params.get('persistent') + create = params.get('create') + template = params.get('template') + datastore_id = params.get('datastore_id') + wait_timeout = params.get('wait_timeout') self.result = {} image = self.get_image_instance(id, name) if not image and desired_state != 'absent': + if create: + self.result = self.create_image(name, template, datastore_id, wait_timeout) # Using 'if id:' doesn't work properly when id=0 - if id is not None: + elif id is not None: module.fail_json(msg="There is no image with id=" + str(id)) elif name is not None: module.fail_json(msg="There is no image with name=" + name) if desired_state == 'absent': - self.result = self.delete_image(image) + self.result = self.delete_image(image, wait_timeout) else: if persistent is not None: self.result = self.change_persistence(image, persistent) if enabled is not None: self.result = self.enable_image(image, enabled) if desired_state == "cloned": - self.result = self.clone_image(image, new_name) + self.result = self.clone_image(image, new_name, wait_timeout) elif desired_state == "renamed": self.result = self.rename_image(image, new_name) @@ -404,6 +467,16 @@ class ImageModule(OpenNebulaModule): else: return self.get_image_by_name(requested_name) + def create_image(self, image_name, template, datastore_id, wait_timeout): + if not self.module.check_mode: + image_id = self.one.image.allocate("NAME = \"" + image_name + "\"\n" + template, datastore_id) + self.wait_for_ready(image_id, wait_timeout) + image = self.get_image_by_id(image_id) + result = self.get_image_info(image) + + result['changed'] = True + return result + def wait_for_ready(self, image_id, wait_timeout=60): import time start_time = time.time() @@ -491,7 +564,7 @@ class ImageModule(OpenNebulaModule): return result - def clone_image(self, image, new_name): + def clone_image(self, image, new_name, wait_timeout): if new_name is None: new_name = "Copy of " + image.NAME @@ -506,7 +579,7 @@ class ImageModule(OpenNebulaModule): if not self.module.check_mode: new_id = self.one.image.clone(image.ID, new_name) - self.wait_for_ready(new_id) + self.wait_for_ready(new_id, wait_timeout) image = self.one.image.info(new_id) result = self.get_image_info(image) @@ -534,7 +607,7 @@ class ImageModule(OpenNebulaModule): result['changed'] = True return result - def delete_image(self, image): + def delete_image(self, image, wait_timeout): if not image: return {'changed': False} @@ -543,7 +616,7 @@ class ImageModule(OpenNebulaModule): if not self.module.check_mode: self.one.image.delete(image.ID) - self.wait_for_delete(image.ID) + self.wait_for_delete(image.ID, wait_timeout) return {'changed': True} From 8fc11fe88f8093d5304cb88ae80d57d930fe1527 Mon Sep 17 00:00:00 2001 From: witrdotnet Date: Sun, 3 Nov 2024 11:25:59 +0100 Subject: [PATCH 319/482] keycloak_clientscope_type fix checkmode (#9093) * fix check_mode on set keycloak client scope type (#9092) * add changelog fragment (#9092) * update changelog fragment (#9092) * compact code: make one line conditions with list comprehension and any() Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * fix syntax error: remove extra ')' * fix changelog fragment type Co-authored-by: Felix Fontein * add issue's link in changelog fragment Co-authored-by: Felix Fontein --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- ...keycloak-clientscope-type-fix-check-mode.yml | 2 ++ plugins/modules/keycloak_clientscope_type.py | 17 +++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml diff --git a/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml b/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml new file mode 100644 index 0000000000..b51eb24136 --- /dev/null +++ b/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093). diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index 37a5d3be94..0c919afdad 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -246,15 +246,19 @@ def main(): if module._diff: result['diff'] = dict(before=result['existing'], after=result['proposed']) - if module.check_mode: - module.exit_json(**result) - default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) + result["changed"] = any(len(x) > 0 for x in [ + default_clientscopes_add, optional_clientscopes_add, default_clientscopes_delete, optional_clientscopes_delete + ]) + + if module.check_mode: + module.exit_json(**result) + # first delete so clientscopes can change type for clientscope in default_clientscopes_delete: kc.delete_default_clientscope(clientscope['id'], realm, client_id) @@ -266,13 +270,6 @@ def main(): for clientscope in optional_clientscopes_add: kc.add_optional_clientscope(clientscope['id'], realm, client_id) - result["changed"] = ( - len(default_clientscopes_add) > 0 - or len(optional_clientscopes_add) > 0 - or len(default_clientscopes_delete) > 0 - or len(optional_clientscopes_delete) > 0 - ) - result['end_state'].update({ 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) From 3d03c373ff975108e8a66b4e4f85171eaa327f47 Mon Sep 17 00:00:00 2001 From: Connor Newton Date: Sun, 3 Nov 2024 10:31:32 +0000 Subject: [PATCH 320/482] jenkins_node: Add set offline message (#9084) * jenkins_node: Add set offline message * Implement offline_message parameter for updating a Jenkins node offline cause reason when the state is "disabled" (offline). * Fix enabled, disable and absent node state redirect authorization issues, same as was present for present. * Add unit tests for redirect authorization workarounds. * * Make docs clearer re: offline_message behaviour * Exit with fail_json() instead of raising when create/delete/enable/disable node fail. * * Add changelog fragment * Update changelog fragments. --------- Co-authored-by: Felix Fontein --- .../9084-jenkins_node-add-offline-message.yml | 8 + plugins/modules/jenkins_node.py | 155 ++++++++-- .../unit/plugins/modules/test_jenkins_node.py | 276 +++++++++++++++++- 3 files changed, 411 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/9084-jenkins_node-add-offline-message.yml diff --git a/changelogs/fragments/9084-jenkins_node-add-offline-message.yml b/changelogs/fragments/9084-jenkins_node-add-offline-message.yml new file mode 100644 index 0000000000..3718127513 --- /dev/null +++ b/changelogs/fragments/9084-jenkins_node-add-offline-message.yml @@ -0,0 +1,8 @@ +minor_changes: + - jenkins_node - add ``offline_message`` parameter for updating a Jenkins node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)." + +bugfixes: + - jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084). + +known_issues: + - jenkins_node - the module is not able to update offline message when node is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084). diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py index 2ee4a481a5..9406eab4c5 100644 --- a/plugins/modules/jenkins_node.py +++ b/plugins/modules/jenkins_node.py @@ -64,6 +64,17 @@ options: - When specified, sets the Jenkins node labels. type: list elements: str + offline_message: + description: + - Specifies the offline reason message to be set when configuring the Jenkins node + state. + - If O(offline_message) is given and requested O(state) is not V(disabled), an + error will be raised. + - Internally O(offline_message) is set via the V(toggleOffline) API, so updating + the message when the node is already offline (current state V(disabled)) is not + possible. In this case, a warning will be issued. + type: str + version_added: 10.0.0 ''' EXAMPLES = ''' @@ -89,6 +100,13 @@ EXAMPLES = ''' - label-1 - label-2 - label-3 + +- name: Set Jenkins node offline with offline message. + community.general.jenkins_node: + name: my-node + state: disabled + offline_message: > + This node is offline for some reason. ''' RETURN = ''' @@ -136,7 +154,8 @@ configured: ''' import sys -from xml.etree import ElementTree +import traceback +from xml.etree import ElementTree as et from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native @@ -164,6 +183,13 @@ class JenkinsNode: self.url = module.params['url'] self.num_executors = module.params['num_executors'] self.labels = module.params['labels'] + self.offline_message = module.params['offline_message'] # type: str | None + + if self.offline_message is not None: + self.offline_message = self.offline_message.strip() + + if self.state != "disabled": + self.module.fail_json("can not set offline message when state is not disabled") if self.labels is not None: for label in self.labels: @@ -207,12 +233,12 @@ class JenkinsNode: configured = False data = self.instance.get_node_config(self.name) - root = ElementTree.fromstring(data) + root = et.fromstring(data) if self.num_executors is not None: elem = root.find('numExecutors') if elem is None: - elem = ElementTree.SubElement(root, 'numExecutors') + elem = et.SubElement(root, 'numExecutors') if elem.text is None or int(elem.text) != self.num_executors: elem.text = str(self.num_executors) configured = True @@ -220,7 +246,7 @@ class JenkinsNode: if self.labels is not None: elem = root.find('label') if elem is None: - elem = ElementTree.SubElement(root, 'label') + elem = et.SubElement(root, 'label') labels = [] if elem.text: labels = elem.text.split() @@ -230,9 +256,9 @@ class JenkinsNode: if configured: if IS_PYTHON_2: - data = ElementTree.tostring(root) + data = et.tostring(root) else: - data = ElementTree.tostring(root, encoding="unicode") + data = et.tostring(root, encoding="unicode") self.instance.reconfig_node(self.name, data) @@ -240,16 +266,24 @@ class JenkinsNode: if configured: self.result['changed'] = True - def present_node(self): + def present_node(self, configure=True): # type: (bool) -> bool + """Assert node present. + + Args: + configure: If True, run node configuration after asserting node present. + + Returns: + True if the node is present, False otherwise (i.e. is check mode). + """ def create_node(): try: self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH) except jenkins.JenkinsException as e: # Some versions of python-jenkins < 1.8.3 has an authorization bug when - # handling redirects returned when posting new resources. If the node is + # handling redirects returned when posting to resources. If the node is # created OK then can ignore the error. if not self.instance.node_exists(self.name): - raise e + self.module.fail_json(msg="Create node failed: %s" % to_native(e), exception=traceback.format_exc()) # TODO: Remove authorization workaround. self.result['warnings'].append( @@ -265,7 +299,8 @@ class JenkinsNode: created = True - self.configure_node(present) + if configure: + self.configure_node(present) self.result['created'] = created if created: @@ -279,10 +314,10 @@ class JenkinsNode: self.instance.delete_node(self.name) except jenkins.JenkinsException as e: # Some versions of python-jenkins < 1.8.3 has an authorization bug when - # handling redirects returned when posting new resources. If the node is + # handling redirects returned when posting to resources. If the node is # deleted OK then can ignore the error. if self.instance.node_exists(self.name): - raise e + self.module.fail_json(msg="Delete node failed: %s" % to_native(e), exception=traceback.format_exc()) # TODO: Remove authorization workaround. self.result['warnings'].append( @@ -302,16 +337,36 @@ class JenkinsNode: self.result['changed'] = True def enabled_node(self): + def get_offline(): # type: () -> bool + return self.instance.get_node_info(self.name)["offline"] + present = self.present_node() enabled = False if present: - info = self.instance.get_node_info(self.name) - - if info['offline']: - if not self.module.check_mode: + def enable_node(): + try: self.instance.enable_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline = get_offline() + + if offline: + self.module.fail_json(msg="Enable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node enabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + offline = get_offline() + + if offline: + if not self.module.check_mode: + enable_node() enabled = True else: @@ -326,18 +381,63 @@ class JenkinsNode: self.result['changed'] = True def disabled_node(self): - present = self.present_node() - - disabled = False - - if present: + def get_offline_info(): info = self.instance.get_node_info(self.name) - if not info['offline']: + offline = info["offline"] + offline_message = info["offlineCauseReason"] + + return offline, offline_message + + # Don't configure until after disabled, in case the change in configuration + # causes the node to pick up a job. + present = self.present_node(False) + + disabled = False + changed = False + + if present: + offline, offline_message = get_offline_info() + + if self.offline_message is not None and self.offline_message != offline_message: + if offline: + # n.b. Internally disable_node uses toggleOffline gated by a not + # offline condition. This means that disable_node can not be used to + # update an offline message if the node is already offline. + # + # Toggling the node online to set the message when toggling offline + # again is not an option as during this transient online time jobs + # may be scheduled on the node which is not acceptable. + self.result["warnings"].append( + "unable to change offline message when already offline" + ) + else: + offline_message = self.offline_message + changed = True + + def disable_node(): + try: + self.instance.disable_node(self.name, offline_message) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline, _offline_message = get_offline_info() + + if not offline: + self.module.fail_json(msg="Disable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node disabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + if not offline: if not self.module.check_mode: - self.instance.disable_node(self.name) + disable_node() disabled = True + else: # Would have created node with initial state enabled therefore would have # needed to disable therefore disabled. @@ -345,10 +445,16 @@ class JenkinsNode: raise Exception("disabled_node present is False outside of check mode") disabled = True - self.result['disabled'] = disabled if disabled: + changed = True + + self.result['disabled'] = disabled + + if changed: self.result['changed'] = True + self.configure_node(present) + def main(): module = AnsibleModule( @@ -360,6 +466,7 @@ def main(): state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'), num_executors=dict(type='int'), labels=dict(type='list', elements='str'), + offline_message=dict(type='str'), ), supports_check_mode=True, ) diff --git a/tests/unit/plugins/modules/test_jenkins_node.py b/tests/unit/plugins/modules/test_jenkins_node.py index 33e7ca0f13..7c2634744d 100644 --- a/tests/unit/plugins/modules/test_jenkins_node.py +++ b/tests/unit/plugins/modules/test_jenkins_node.py @@ -207,6 +207,47 @@ def test_state_present_when_absent_check_mode(get_instance, instance, state): assert result.value["changed"] is True +@mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) +def test_state_present_when_absent_redirect_auth_error_handled( + get_instance, instance, state +): + instance.node_exists.side_effect = [False, True] + instance.get_node_config.return_value = "" + instance.create_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": state, + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) + + assert result.value["created"] is True + assert result.value["changed"] is True + + +@mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) +def test_state_present_when_absent_other_error_raised(get_instance, instance, state): + instance.node_exists.side_effect = [False, False] + instance.get_node_config.return_value = "" + instance.create_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": state, + }) + + with raises(AnsibleFailJson) as result: + jenkins_node.main() + + assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) + + assert "Create node failed" in str(result.value) + + def test_state_present_when_present(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" @@ -262,6 +303,43 @@ def test_state_absent_when_present_check_mode(get_instance, instance): assert result.value["changed"] is True +def test_state_absent_when_present_redirect_auth_error_handled(get_instance, instance): + instance.node_exists.side_effect = [True, False] + instance.get_node_config.return_value = "" + instance.delete_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "absent", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.delete_node.call_args == call("my-node") + + assert result.value["deleted"] is True + assert result.value["changed"] is True + + +def test_state_absent_when_present_other_error_raised(get_instance, instance): + instance.node_exists.side_effect = [True, True] + instance.get_node_config.return_value = "" + instance.delete_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "absent", + }) + + with raises(AnsibleFailJson) as result: + jenkins_node.main() + + assert instance.delete_node.call_args == call("my-node") + + assert "Delete node failed" in str(result.value) + + def test_state_absent_when_absent(get_instance, instance): instance.node_exists.return_value = False instance.get_node_config.return_value = "" @@ -319,6 +397,45 @@ def test_state_enabled_when_offline_check_mode(get_instance, instance): assert result.value["changed"] is True +def test_state_enabled_when_offline_redirect_auth_error_handled(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.side_effect = [{"offline": True}, {"offline": False}] + instance.enable_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "enabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.enable_node.call_args == call("my-node") + + assert result.value["enabled"] is True + assert result.value["changed"] is True + + +def test_state_enabled_when_offline_other_error_raised(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.side_effect = [{"offline": True}, {"offline": True}] + instance.enable_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "enabled", + }) + + with raises(AnsibleFailJson) as result: + jenkins_node.main() + + assert instance.enable_node.call_args == call("my-node") + + assert "Enable node failed" in str(result.value) + + def test_state_enabled_when_not_offline(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" @@ -341,7 +458,10 @@ def test_state_enabled_when_not_offline(get_instance, instance): def test_state_disabled_when_not_offline(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - instance.get_node_info.return_value = {"offline": False} + instance.get_node_info.return_value = { + "offline": False, + "offlineCauseReason": "", + } set_module_args({ "name": "my-node", @@ -351,16 +471,78 @@ def test_state_disabled_when_not_offline(get_instance, instance): with raises(AnsibleExitJson) as result: jenkins_node.main() - assert instance.disable_node.call_args == call("my-node") + assert instance.disable_node.call_args == call("my-node", "") assert result.value["disabled"] is True assert result.value["changed"] is True +def test_state_disabled_when_not_offline_redirect_auth_error_handled( + get_instance, instance +): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.side_effect = [ + { + "offline": False, + "offlineCauseReason": "", + }, + { + "offline": True, + "offlineCauseReason": "", + }, + ] + instance.disable_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "disabled", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.disable_node.call_args == call("my-node", "") + + assert result.value["disabled"] is True + assert result.value["changed"] is True + + +def test_state_disabled_when_not_offline_other_error_raised(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.side_effect = [ + { + "offline": False, + "offlineCauseReason": "", + }, + { + "offline": False, + "offlineCauseReason": "", + }, + ] + instance.disable_node.side_effect = jenkins.JenkinsException + + set_module_args({ + "name": "my-node", + "state": "disabled", + }) + + with raises(AnsibleFailJson) as result: + jenkins_node.main() + + assert instance.disable_node.call_args == call("my-node", "") + + assert "Disable node failed" in str(result.value) + + def test_state_disabled_when_not_offline_check_mode(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - instance.get_node_info.return_value = {"offline": False} + instance.get_node_info.return_value = { + "offline": False, + "offlineCauseReason": "", + } set_module_args({ "name": "my-node", @@ -380,7 +562,10 @@ def test_state_disabled_when_not_offline_check_mode(get_instance, instance): def test_state_disabled_when_offline(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - instance.get_node_info.return_value = {"offline": True} + instance.get_node_info.return_value = { + "offline": True, + "offlineCauseReason": "", + } set_module_args({ "name": "my-node", @@ -573,3 +758,86 @@ def test_configure_labels_fail_when_contains_space(get_instance, instance): jenkins_node.main() assert not instance.reconfig_node.called + + +@mark.parametrize(["state"], [param(state) for state in ["enabled", "present", "absent"]]) +def test_raises_error_if_offline_message_when_state_not_disabled(get_instance, instance, state): + set_module_args({ + "name": "my-node", + "state": state, + "offline_message": "This is a message...", + }) + + with raises(AnsibleFailJson): + jenkins_node.main() + + assert not instance.disable_node.called + + +def test_set_offline_message_when_equal(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = { + "offline": True, + "offlineCauseReason": "This is an old message...", + } + + set_module_args({ + "name": "my-node", + "state": "disabled", + "offline_message": "This is an old message...", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.disable_node.called + + assert result.value["changed"] is False + + +def test_set_offline_message_when_not_equal_not_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = { + "offline": False, + "offlineCauseReason": "This is an old message...", + } + + set_module_args({ + "name": "my-node", + "state": "disabled", + "offline_message": "This is a new message...", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert instance.disable_node.call_args == call("my-node", "This is a new message...") + + assert result.value["changed"] is True + + +# Not calling disable_node when already offline seems like a sensible thing to do. +# However, we need to call disable_node to set the offline message, so check that +# we do even when already offline. +def test_set_offline_message_when_not_equal_offline(get_instance, instance): + instance.node_exists.return_value = True + instance.get_node_config.return_value = "" + instance.get_node_info.return_value = { + "offline": True, + "offlineCauseReason": "This is an old message...", + } + + set_module_args({ + "name": "my-node", + "state": "disabled", + "offline_message": "This is a new message...", + }) + + with raises(AnsibleExitJson) as result: + jenkins_node.main() + + assert not instance.disable_node.called + + assert result.value["changed"] is False From 94e3635c0aa7e40d59e677988441b0c32ccc7ef9 Mon Sep 17 00:00:00 2001 From: bluikko <14869000+bluikko@users.noreply.github.com> Date: Sun, 3 Nov 2024 20:55:16 +0700 Subject: [PATCH 321/482] mattermost: add support for message priority (#9087) * mattermost: add support for message priority * Add changelog fragment * Consistency nit in changelog Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Validate priority arg and doc fixes Validate the two possible priorities with choices. Add priority arg to one example and add version_added field for the arg docs. * Update changelog. --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- changelogs/fragments/9087-mattermost-priority.yaml | 2 ++ plugins/modules/mattermost.py | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9087-mattermost-priority.yaml diff --git a/changelogs/fragments/9087-mattermost-priority.yaml b/changelogs/fragments/9087-mattermost-priority.yaml new file mode 100644 index 0000000000..f66d4189cc --- /dev/null +++ b/changelogs/fragments/9087-mattermost-priority.yaml @@ -0,0 +1,2 @@ +minor_changes: + - mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068, https://github.com/ansible-collections/community.general/pull/9087). diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py index 154040a8fd..af8ce69600 100644 --- a/plugins/modules/mattermost.py +++ b/plugins/modules/mattermost.py @@ -62,13 +62,19 @@ options: username: type: str description: - - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. + - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc). default: Ansible icon_url: type: str description: - URL for the message sender's icon. default: https://docs.ansible.com/favicon.ico + priority: + type: str + description: + - Set a priority for the message. + choices: [ important, urgent ] + version_added: 10.0.0 validate_certs: description: - If V(false), SSL certificates will not be validated. This should only be used @@ -92,6 +98,7 @@ EXAMPLES = """ channel: notifications username: 'Ansible on {{ inventory_hostname }}' icon_url: http://www.example.com/some-image-file.png + priority: important - name: Send attachments message via Mattermost community.general.mattermost: @@ -135,6 +142,7 @@ def main(): channel=dict(type='str', default=None), username=dict(type='str', default='Ansible'), icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + priority=dict(type='str', default=None, choices=['important', 'urgent']), validate_certs=dict(default=True, type='bool'), attachments=dict(type='list', elements='dict'), ), @@ -154,6 +162,8 @@ def main(): for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: if module.params[param] is not None: payload[param] = module.params[param] + if module.params['priority'] is not None: + payload['priority'] = {'priority': module.params['priority']} payload = module.jsonify(payload) result['payload'] = payload From 886d4a6596c3e99f4d9351ec4913ce95a2d8d74b Mon Sep 17 00:00:00 2001 From: Mikhail Vorontsov <52924343+mephs@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:02:34 +0300 Subject: [PATCH 322/482] proxmox inventory: fix urllib3 InsecureRequestWarnings not suppressing when a token is used (#9099) * proxmox inventory: fix urllib3 InsecureRequestWarnings not suppressing when a token is used * proxmox inventory: add changelog fragment * proxmox inventory: add forgotten pr number * Update changelog. --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9099-proxmox-fix-insecure.yml | 2 ++ plugins/inventory/proxmox.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/9099-proxmox-fix-insecure.yml diff --git a/changelogs/fragments/9099-proxmox-fix-insecure.yml b/changelogs/fragments/9099-proxmox-fix-insecure.yml new file mode 100644 index 0000000000..b277a0f933 --- /dev/null +++ b/changelogs/fragments/9099-proxmox-fix-insecure.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index d7e2107719..3ce4f789a3 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -276,16 +276,18 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _get_auth(self): + validate_certs = self.get_option('validate_certs') + + if validate_certs is False: + from requests.packages.urllib3 import disable_warnings + disable_warnings() + if self.proxmox_password: credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) a = self._get_session() - if a.verify is False: - from requests.packages.urllib3 import disable_warnings - disable_warnings() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) json = ret.json() From 187910df517c881a99d0f06abd197dca3c19794c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 4 Nov 2024 19:11:15 +0100 Subject: [PATCH 323/482] Clean up repo; new features will go into 10.1.0. --- changelogs/changelog.yaml | 2 +- .../8051-Redfish-Wait-For-Service.yml | 3 --- ...udosu-not-working-on-some-BSD-machines.yml | 2 -- .../8402-add-diif-mode-openbsd-pkg.yml | 2 -- ...8403-fix-typeerror-in-keycloak-client.yaml | 2 -- .../fragments/8404-ipa_dnsrecord_sshfp.yml | 2 -- .../8405-gitlab-remove-basic-auth.yml | 2 -- .../8406-fix-homebrew-cask-warning.yaml | 2 -- .../fragments/8411-locale-gen-vardict.yml | 11 --------- changelogs/fragments/8413-galaxy-refactor.yml | 2 -- .../fragments/8415-cmd-runner-stack.yml | 2 -- ...ign-auth-flow-by-name-keycloak-client.yaml | 2 -- ...en-using-logs-with-uri-or-slurp-tasks.yaml | 3 --- changelogs/fragments/8431-galaxy-upgrade.yml | 2 -- .../8440-allow-api-port-specification.yaml | 2 -- .../8444-fix-redfish-gen2-upgrade.yaml | 2 -- .../fragments/8452-git_config-absent.yml | 2 -- .../8453-git_config-deprecate-read.yml | 3 --- .../fragments/8464-redis-add-cluster-info.yml | 2 -- .../8471-proxmox-vm-info-network.yml | 2 -- .../8476-launchd-check-mode-changed.yaml | 2 -- .../fragments/8479-cmdrunner-improvements.yml | 4 ---- .../8480-directory-feature-cargo.yml | 2 -- ...ula-inventory-crash-when-nic-has-no-ip.yml | 2 -- ...ycloak_clientscope-add-normalizations.yaml | 2 -- changelogs/fragments/8497-crypt.yml | 3 --- .../fragments/8508-virtualbox-inventory.yml | 3 --- changelogs/fragments/8512-as-bool-not.yml | 2 -- changelogs/fragments/8514-pacman-empty.yml | 2 -- .../8516-proxmox-template-refactor.yml | 2 -- .../fragments/8517-cmd-runner-lang-auto.yml | 2 -- ...8532-expand-opennuebula-inventory-data.yml | 2 -- .../fragments/8533-add-ciphers-option.yml | 4 ---- .../8542-fix-proxmox-volume-handling.yml | 5 ---- ...cloak-clientscope-remove-id-on-compare.yml | 2 -- .../fragments/8557-fix-bug-with-bitwarden.yml | 2 -- .../8613-redfish_utils-language.yaml | 2 -- .../8614-nsupdate-index-out-of-range.yml | 2 -- changelogs/fragments/8623-become-types.yml | 2 -- changelogs/fragments/8624-cache-types.yml | 2 -- changelogs/fragments/8625-inventory-types.yml | 2 -- changelogs/fragments/8626-lookup-types.yml | 2 -- .../fragments/8627-connection-types.yml | 2 -- changelogs/fragments/8628-callback-types.yml | 2 -- .../8632-pkgng-add-option-use_globs.yml | 2 -- .../8646-fix-bug-in-proxmox-volumes.yml | 4 ---- .../8648-fix-gitlab-runner-paused.yaml | 2 -- .../8652-Redfish-Password-Change-Required.yml | 2 -- .../fragments/8654-add-redis-tls-params.yml | 2 -- ...8674-add-gitlab-project-cleanup-policy.yml | 3 --- .../fragments/8675-pipx-install-suffix.yml | 2 -- .../fragments/8679-fix-cloudflare-srv.yml | 2 -- .../fragments/8682-locale-gen-multiple.yaml | 2 -- .../8688-gitlab_project-add-new-params.yml | 4 ---- .../8689-passwordstore-lock-naming.yml | 2 -- ...eycloak_user_federation-mapper-removal.yml | 2 -- .../8708-homebrew_cask-fix-upgrade-all.yml | 2 -- .../fragments/8711-gconftool2-refactor.yml | 2 -- .../fragments/8713-proxmox_lxc_interfaces.yml | 2 -- .../8719-openiscsi-add-multiple-targets.yaml | 2 -- ...r-get-cleartext-secret-from-realm-info.yml | 2 -- .../8738-limit-packages-for-copr.yml | 2 -- .../8741-fix-opentelemetry-callback.yml | 2 -- .../8759-gitlab_project-sort-params.yml | 2 -- ...gitlab_project-add-issues-access-level.yml | 2 -- ...sort-desired-and-after-mappers-by-name.yml | 2 -- ...federation-fix-key-error-when-updating.yml | 2 -- ..._federation-make-mapper-removal-optout.yml | 2 -- changelogs/fragments/8766-mh-deco-improve.yml | 3 --- .../8776-mute-vardict-deprecation.yml | 3 --- ...lAttribute-to-empty-string-if-missing.yaml | 2 -- ...t-fix-cleanup-policy-on-project-create.yml | 3 --- .../8791-mh-cause-changes-param-depr.yml | 4 ---- changelogs/fragments/8793-pipx-global.yml | 12 ---------- ...4-Fixing-possible-concatination-error.yaml | 2 -- .../8796-gitlab-access-token-check-mode.yml | 3 --- changelogs/fragments/8809-pipx-new-params.yml | 2 -- ...emove-lastSync-param-from-kc-responses.yml | 2 -- .../fragments/8814-dict-comprehension.yml | 23 ------------------- .../fragments/8822-dict-comprehension.yml | 21 ----------------- .../fragments/8823-keycloak-realm-key.yml | 2 -- ...1-fix-error-when-mapper-id-is-provided.yml | 2 -- .../fragments/8833-dict-comprehension.yml | 23 ------------------- .../fragments/8855-gio_mime_vardict.yml | 2 -- changelogs/fragments/8856-jira_vardict.yml | 2 -- .../fragments/8858-dict-comprehension.yml | 11 --------- changelogs/fragments/8876-dict-items-loop.yml | 16 ------------- ...lm-sort-lists-before-change-detection.yaml | 2 -- .../fragments/8885-add-force-flag-for-nmp.yml | 2 -- .../fragments/8887-fix-one_service-unique.yml | 2 -- .../8889-refactor-one-image-modules.yml | 6 ----- .../fragments/8895-fix-comprehension.yaml | 2 -- .../8897-nmcli-add-reload-and-up-down.yml | 3 --- ...ude-bind-credential-from-change-check.yaml | 2 -- .../8900-ipa-hostgroup-fix-states.yml | 2 -- changelogs/fragments/8907-fix-one-host-id.yml | 2 -- .../8908-add-gitlab-group-params.yml | 2 -- .../8909-flatpak-improve-name-parsing.yaml | 2 -- .../fragments/8917-proxmox-clean-auth.yml | 2 -- .../fragments/8920-ipa-host-fix-state.yml | 2 -- ...pty-response-when-fetching-userprofile.yml | 2 -- changelogs/fragments/8925-atomic.yml | 6 ----- .../fragments/8928-cmd-runner-10.0.0.yml | 2 -- .../fragments/8929-cmd_runner-bugfix.yml | 2 -- ...add-StorageId-RedfishURI-to-disk-facts.yml | 2 -- ...8940-keycloak_userprofile-improve-diff.yml | 2 -- .../fragments/8944-django-command-fix.yml | 3 --- ...ord-store-lookup-create-subkey-support.yml | 2 -- ...user-federation-add-referral-parameter.yml | 2 -- ...ytes-from-the-required-parameters_list.yml | 2 -- .../8964-cmd-runner-argformat-refactor.yml | 2 -- .../fragments/8966-dig-add-port-option.yml | 4 ---- .../8970-fix-dig-multi-nameservers.yml | 2 -- .../8973-keycloak_client-add-x509-auth.yml | 2 -- .../8979-keycloak_group-fix-subgroups.yml | 2 -- changelogs/fragments/8987-legacycrypt.yml | 3 --- .../8989-github-app-token-from-fact.yml | 2 -- changelogs/fragments/8990.yml | 3 --- .../9010-edit-gitlab-label-color.yaml | 2 -- ...ton-requires-a-job-initiated-at-reboot.yml | 4 ---- changelogs/fragments/9019-onevnet-bugfix.yml | 2 -- .../fragments/9022-improve-homebrew-perf.yml | 2 -- .../fragments/9026-consul_kv-datacenter.yml | 2 -- ...upport-organizations-in-keycloak-realm.yml | 2 -- ...8-bitwarden-secrets-manager-syntax-fix.yml | 2 -- changelogs/fragments/9044-pipx-fixes.yml | 7 ------ .../fragments/9047-redfish-uri-parsing.yml | 2 -- changelogs/fragments/9052-modprobe-bugfix.yml | 2 -- .../fragments/9056-fix-one_image-modules.yml | 3 --- ...redfish_command-updateuseraccounttypes.yml | 2 -- .../9060-ansible-galaxy-install-version.yml | 2 -- changelogs/fragments/9061-cpanm-version.yml | 2 -- changelogs/fragments/9063-django-version.yml | 5 ---- .../fragments/9064-gconftool2-version.yml | 4 ---- .../fragments/9066-proxmox-kvm-ciupgrade.yml | 2 -- .../fragments/9067-gio-mime-version.yml | 3 --- .../fragments/9075-add-creation-oneimage.yml | 3 --- .../9084-collection_version-importlib.yml | 2 -- .../9084-jenkins_node-add-offline-message.yml | 8 ------- .../fragments/9086-gio-mime-version.yml | 2 -- .../fragments/9087-mattermost-priority.yaml | 2 -- ...ycloak-clientscope-type-fix-check-mode.yml | 2 -- .../fragments/9099-proxmox-fix-insecure.yml | 2 -- changelogs/fragments/deprecate-hipchat.yml | 2 -- changelogs/fragments/deprecations.yml | 8 ------- changelogs/fragments/removals.yml | 10 -------- galaxy.yml | 2 +- 147 files changed, 2 insertions(+), 467 deletions(-) delete mode 100644 changelogs/fragments/8051-Redfish-Wait-For-Service.yml delete mode 100644 changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml delete mode 100644 changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml delete mode 100644 changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml delete mode 100644 changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml delete mode 100644 changelogs/fragments/8405-gitlab-remove-basic-auth.yml delete mode 100644 changelogs/fragments/8406-fix-homebrew-cask-warning.yaml delete mode 100644 changelogs/fragments/8411-locale-gen-vardict.yml delete mode 100644 changelogs/fragments/8413-galaxy-refactor.yml delete mode 100644 changelogs/fragments/8415-cmd-runner-stack.yml delete mode 100644 changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml delete mode 100644 changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml delete mode 100644 changelogs/fragments/8431-galaxy-upgrade.yml delete mode 100644 changelogs/fragments/8440-allow-api-port-specification.yaml delete mode 100644 changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml delete mode 100644 changelogs/fragments/8452-git_config-absent.yml delete mode 100644 changelogs/fragments/8453-git_config-deprecate-read.yml delete mode 100644 changelogs/fragments/8464-redis-add-cluster-info.yml delete mode 100644 changelogs/fragments/8471-proxmox-vm-info-network.yml delete mode 100644 changelogs/fragments/8476-launchd-check-mode-changed.yaml delete mode 100644 changelogs/fragments/8479-cmdrunner-improvements.yml delete mode 100644 changelogs/fragments/8480-directory-feature-cargo.yml delete mode 100644 changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml delete mode 100644 changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml delete mode 100644 changelogs/fragments/8497-crypt.yml delete mode 100644 changelogs/fragments/8508-virtualbox-inventory.yml delete mode 100644 changelogs/fragments/8512-as-bool-not.yml delete mode 100644 changelogs/fragments/8514-pacman-empty.yml delete mode 100644 changelogs/fragments/8516-proxmox-template-refactor.yml delete mode 100644 changelogs/fragments/8517-cmd-runner-lang-auto.yml delete mode 100644 changelogs/fragments/8532-expand-opennuebula-inventory-data.yml delete mode 100644 changelogs/fragments/8533-add-ciphers-option.yml delete mode 100644 changelogs/fragments/8542-fix-proxmox-volume-handling.yml delete mode 100644 changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml delete mode 100644 changelogs/fragments/8557-fix-bug-with-bitwarden.yml delete mode 100644 changelogs/fragments/8613-redfish_utils-language.yaml delete mode 100644 changelogs/fragments/8614-nsupdate-index-out-of-range.yml delete mode 100644 changelogs/fragments/8623-become-types.yml delete mode 100644 changelogs/fragments/8624-cache-types.yml delete mode 100644 changelogs/fragments/8625-inventory-types.yml delete mode 100644 changelogs/fragments/8626-lookup-types.yml delete mode 100644 changelogs/fragments/8627-connection-types.yml delete mode 100644 changelogs/fragments/8628-callback-types.yml delete mode 100644 changelogs/fragments/8632-pkgng-add-option-use_globs.yml delete mode 100644 changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml delete mode 100644 changelogs/fragments/8648-fix-gitlab-runner-paused.yaml delete mode 100644 changelogs/fragments/8652-Redfish-Password-Change-Required.yml delete mode 100644 changelogs/fragments/8654-add-redis-tls-params.yml delete mode 100644 changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml delete mode 100644 changelogs/fragments/8675-pipx-install-suffix.yml delete mode 100644 changelogs/fragments/8679-fix-cloudflare-srv.yml delete mode 100644 changelogs/fragments/8682-locale-gen-multiple.yaml delete mode 100644 changelogs/fragments/8688-gitlab_project-add-new-params.yml delete mode 100644 changelogs/fragments/8689-passwordstore-lock-naming.yml delete mode 100644 changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml delete mode 100644 changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml delete mode 100644 changelogs/fragments/8711-gconftool2-refactor.yml delete mode 100644 changelogs/fragments/8713-proxmox_lxc_interfaces.yml delete mode 100644 changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml delete mode 100644 changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml delete mode 100644 changelogs/fragments/8738-limit-packages-for-copr.yml delete mode 100644 changelogs/fragments/8741-fix-opentelemetry-callback.yml delete mode 100644 changelogs/fragments/8759-gitlab_project-sort-params.yml delete mode 100644 changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml delete mode 100644 changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml delete mode 100644 changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml delete mode 100644 changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml delete mode 100644 changelogs/fragments/8766-mh-deco-improve.yml delete mode 100644 changelogs/fragments/8776-mute-vardict-deprecation.yml delete mode 100644 changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml delete mode 100644 changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml delete mode 100644 changelogs/fragments/8791-mh-cause-changes-param-depr.yml delete mode 100644 changelogs/fragments/8793-pipx-global.yml delete mode 100644 changelogs/fragments/8794-Fixing-possible-concatination-error.yaml delete mode 100644 changelogs/fragments/8796-gitlab-access-token-check-mode.yml delete mode 100644 changelogs/fragments/8809-pipx-new-params.yml delete mode 100644 changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml delete mode 100644 changelogs/fragments/8814-dict-comprehension.yml delete mode 100644 changelogs/fragments/8822-dict-comprehension.yml delete mode 100644 changelogs/fragments/8823-keycloak-realm-key.yml delete mode 100644 changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml delete mode 100644 changelogs/fragments/8833-dict-comprehension.yml delete mode 100644 changelogs/fragments/8855-gio_mime_vardict.yml delete mode 100644 changelogs/fragments/8856-jira_vardict.yml delete mode 100644 changelogs/fragments/8858-dict-comprehension.yml delete mode 100644 changelogs/fragments/8876-dict-items-loop.yml delete mode 100644 changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml delete mode 100644 changelogs/fragments/8885-add-force-flag-for-nmp.yml delete mode 100644 changelogs/fragments/8887-fix-one_service-unique.yml delete mode 100644 changelogs/fragments/8889-refactor-one-image-modules.yml delete mode 100644 changelogs/fragments/8895-fix-comprehension.yaml delete mode 100644 changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml delete mode 100644 changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml delete mode 100644 changelogs/fragments/8900-ipa-hostgroup-fix-states.yml delete mode 100644 changelogs/fragments/8907-fix-one-host-id.yml delete mode 100644 changelogs/fragments/8908-add-gitlab-group-params.yml delete mode 100644 changelogs/fragments/8909-flatpak-improve-name-parsing.yaml delete mode 100644 changelogs/fragments/8917-proxmox-clean-auth.yml delete mode 100644 changelogs/fragments/8920-ipa-host-fix-state.yml delete mode 100644 changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml delete mode 100644 changelogs/fragments/8925-atomic.yml delete mode 100644 changelogs/fragments/8928-cmd-runner-10.0.0.yml delete mode 100644 changelogs/fragments/8929-cmd_runner-bugfix.yml delete mode 100644 changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml delete mode 100644 changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml delete mode 100644 changelogs/fragments/8944-django-command-fix.yml delete mode 100644 changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml delete mode 100644 changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml delete mode 100644 changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml delete mode 100644 changelogs/fragments/8964-cmd-runner-argformat-refactor.yml delete mode 100644 changelogs/fragments/8966-dig-add-port-option.yml delete mode 100644 changelogs/fragments/8970-fix-dig-multi-nameservers.yml delete mode 100644 changelogs/fragments/8973-keycloak_client-add-x509-auth.yml delete mode 100644 changelogs/fragments/8979-keycloak_group-fix-subgroups.yml delete mode 100644 changelogs/fragments/8987-legacycrypt.yml delete mode 100644 changelogs/fragments/8989-github-app-token-from-fact.yml delete mode 100644 changelogs/fragments/8990.yml delete mode 100644 changelogs/fragments/9010-edit-gitlab-label-color.yaml delete mode 100644 changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml delete mode 100644 changelogs/fragments/9019-onevnet-bugfix.yml delete mode 100644 changelogs/fragments/9022-improve-homebrew-perf.yml delete mode 100644 changelogs/fragments/9026-consul_kv-datacenter.yml delete mode 100644 changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml delete mode 100644 changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml delete mode 100644 changelogs/fragments/9044-pipx-fixes.yml delete mode 100644 changelogs/fragments/9047-redfish-uri-parsing.yml delete mode 100644 changelogs/fragments/9052-modprobe-bugfix.yml delete mode 100644 changelogs/fragments/9056-fix-one_image-modules.yml delete mode 100644 changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml delete mode 100644 changelogs/fragments/9060-ansible-galaxy-install-version.yml delete mode 100644 changelogs/fragments/9061-cpanm-version.yml delete mode 100644 changelogs/fragments/9063-django-version.yml delete mode 100644 changelogs/fragments/9064-gconftool2-version.yml delete mode 100644 changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml delete mode 100644 changelogs/fragments/9067-gio-mime-version.yml delete mode 100644 changelogs/fragments/9075-add-creation-oneimage.yml delete mode 100644 changelogs/fragments/9084-collection_version-importlib.yml delete mode 100644 changelogs/fragments/9084-jenkins_node-add-offline-message.yml delete mode 100644 changelogs/fragments/9086-gio-mime-version.yml delete mode 100644 changelogs/fragments/9087-mattermost-priority.yaml delete mode 100644 changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml delete mode 100644 changelogs/fragments/9099-proxmox-fix-insecure.yml delete mode 100644 changelogs/fragments/deprecate-hipchat.yml delete mode 100644 changelogs/fragments/deprecations.yml delete mode 100644 changelogs/fragments/removals.yml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 5aa97d97e9..ab0a7be6fd 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,3 +1,3 @@ --- -ancestor: 9.0.0 +ancestor: 10.0.0 releases: {} diff --git a/changelogs/fragments/8051-Redfish-Wait-For-Service.yml b/changelogs/fragments/8051-Redfish-Wait-For-Service.yml deleted file mode 100644 index 826c40e8af..0000000000 --- a/changelogs/fragments/8051-Redfish-Wait-For-Service.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). - - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). diff --git a/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml b/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml deleted file mode 100644 index 411ba8e868..0000000000 --- a/changelogs/fragments/8214-sudosu-not-working-on-some-BSD-machines.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). diff --git a/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml b/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml deleted file mode 100644 index 2a4e7dfd8d..0000000000 --- a/changelogs/fragments/8402-add-diif-mode-openbsd-pkg.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). diff --git a/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml b/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml deleted file mode 100644 index b8acf7b09b..0000000000 --- a/changelogs/fragments/8403-fix-typeerror-in-keycloak-client.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). diff --git a/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml b/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml deleted file mode 100644 index e989f5dbb1..0000000000 --- a/changelogs/fragments/8404-ipa_dnsrecord_sshfp.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). diff --git a/changelogs/fragments/8405-gitlab-remove-basic-auth.yml b/changelogs/fragments/8405-gitlab-remove-basic-auth.yml deleted file mode 100644 index f8a03a3d71..0000000000 --- a/changelogs/fragments/8405-gitlab-remove-basic-auth.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405). diff --git a/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml b/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml deleted file mode 100644 index 0e3bf38ed3..0000000000 --- a/changelogs/fragments/8406-fix-homebrew-cask-warning.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044). diff --git a/changelogs/fragments/8411-locale-gen-vardict.yml b/changelogs/fragments/8411-locale-gen-vardict.yml deleted file mode 100644 index 5220731281..0000000000 --- a/changelogs/fragments/8411-locale-gen-vardict.yml +++ /dev/null @@ -1,11 +0,0 @@ -bugfixes: - - django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). diff --git a/changelogs/fragments/8413-galaxy-refactor.yml b/changelogs/fragments/8413-galaxy-refactor.yml deleted file mode 100644 index edd1601be8..0000000000 --- a/changelogs/fragments/8413-galaxy-refactor.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). diff --git a/changelogs/fragments/8415-cmd-runner-stack.yml b/changelogs/fragments/8415-cmd-runner-stack.yml deleted file mode 100644 index 555683e057..0000000000 --- a/changelogs/fragments/8415-cmd-runner-stack.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). diff --git a/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml b/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml deleted file mode 100644 index d9bb9bc3ea..0000000000 --- a/changelogs/fragments/8428-assign-auth-flow-by-name-keycloak-client.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). diff --git a/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml b/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml deleted file mode 100644 index 29da61c8bf..0000000000 --- a/changelogs/fragments/8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430). - - opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). \ No newline at end of file diff --git a/changelogs/fragments/8431-galaxy-upgrade.yml b/changelogs/fragments/8431-galaxy-upgrade.yml deleted file mode 100644 index 9be9ca93c8..0000000000 --- a/changelogs/fragments/8431-galaxy-upgrade.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351). diff --git a/changelogs/fragments/8440-allow-api-port-specification.yaml b/changelogs/fragments/8440-allow-api-port-specification.yaml deleted file mode 100644 index 646ee1ab60..0000000000 --- a/changelogs/fragments/8440-allow-api-port-specification.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441). diff --git a/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml b/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml deleted file mode 100644 index d094327240..0000000000 --- a/changelogs/fragments/8444-fix-redfish-gen2-upgrade.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444). diff --git a/changelogs/fragments/8452-git_config-absent.yml b/changelogs/fragments/8452-git_config-absent.yml deleted file mode 100644 index 11e0767713..0000000000 --- a/changelogs/fragments/8452-git_config-absent.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452)." diff --git a/changelogs/fragments/8453-git_config-deprecate-read.yml b/changelogs/fragments/8453-git_config-deprecate-read.yml deleted file mode 100644 index a291568fce..0000000000 --- a/changelogs/fragments/8453-git_config-deprecate-read.yml +++ /dev/null @@ -1,3 +0,0 @@ -deprecated_features: - - "git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453)." - - "git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453)." diff --git a/changelogs/fragments/8464-redis-add-cluster-info.yml b/changelogs/fragments/8464-redis-add-cluster-info.yml deleted file mode 100644 index 921307d716..0000000000 --- a/changelogs/fragments/8464-redis-add-cluster-info.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). diff --git a/changelogs/fragments/8471-proxmox-vm-info-network.yml b/changelogs/fragments/8471-proxmox-vm-info-network.yml deleted file mode 100644 index f658b78831..0000000000 --- a/changelogs/fragments/8471-proxmox-vm-info-network.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471). diff --git a/changelogs/fragments/8476-launchd-check-mode-changed.yaml b/changelogs/fragments/8476-launchd-check-mode-changed.yaml deleted file mode 100644 index dc1e60de36..0000000000 --- a/changelogs/fragments/8476-launchd-check-mode-changed.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). diff --git a/changelogs/fragments/8479-cmdrunner-improvements.yml b/changelogs/fragments/8479-cmdrunner-improvements.yml deleted file mode 100644 index 075f5f5cd6..0000000000 --- a/changelogs/fragments/8479-cmdrunner-improvements.yml +++ /dev/null @@ -1,4 +0,0 @@ -deprecated_features: - - CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). -minor_changes: - - CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). diff --git a/changelogs/fragments/8480-directory-feature-cargo.yml b/changelogs/fragments/8480-directory-feature-cargo.yml deleted file mode 100644 index 8892e7c5dd..0000000000 --- a/changelogs/fragments/8480-directory-feature-cargo.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480)." diff --git a/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml b/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml deleted file mode 100644 index 3db86f364e..0000000000 --- a/changelogs/fragments/8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). diff --git a/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml b/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml deleted file mode 100644 index 8af320cae0..0000000000 --- a/changelogs/fragments/8496-keycloak_clientscope-add-normalizations.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496). diff --git a/changelogs/fragments/8497-crypt.yml b/changelogs/fragments/8497-crypt.yml deleted file mode 100644 index f77f6c20f9..0000000000 --- a/changelogs/fragments/8497-crypt.yml +++ /dev/null @@ -1,3 +0,0 @@ -known_issues: - - "homectl - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8497)." - - "udm_user - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8497)." diff --git a/changelogs/fragments/8508-virtualbox-inventory.yml b/changelogs/fragments/8508-virtualbox-inventory.yml deleted file mode 100644 index dd14818331..0000000000 --- a/changelogs/fragments/8508-virtualbox-inventory.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - >- - virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510). \ No newline at end of file diff --git a/changelogs/fragments/8512-as-bool-not.yml b/changelogs/fragments/8512-as-bool-not.yml deleted file mode 100644 index f579c19810..0000000000 --- a/changelogs/fragments/8512-as-bool-not.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512). diff --git a/changelogs/fragments/8514-pacman-empty.yml b/changelogs/fragments/8514-pacman-empty.yml deleted file mode 100644 index c51ba21acc..0000000000 --- a/changelogs/fragments/8514-pacman-empty.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514)." diff --git a/changelogs/fragments/8516-proxmox-template-refactor.yml b/changelogs/fragments/8516-proxmox-template-refactor.yml deleted file mode 100644 index c069985111..0000000000 --- a/changelogs/fragments/8516-proxmox-template-refactor.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516). diff --git a/changelogs/fragments/8517-cmd-runner-lang-auto.yml b/changelogs/fragments/8517-cmd-runner-lang-auto.yml deleted file mode 100644 index 086a74e997..0000000000 --- a/changelogs/fragments/8517-cmd-runner-lang-auto.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517). diff --git a/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml b/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml deleted file mode 100644 index a1b0ffe2c0..0000000000 --- a/changelogs/fragments/8532-expand-opennuebula-inventory-data.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). diff --git a/changelogs/fragments/8533-add-ciphers-option.yml b/changelogs/fragments/8533-add-ciphers-option.yml deleted file mode 100644 index 7f9880ebee..0000000000 --- a/changelogs/fragments/8533-add-ciphers-option.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533). -... diff --git a/changelogs/fragments/8542-fix-proxmox-volume-handling.yml b/changelogs/fragments/8542-fix-proxmox-volume-handling.yml deleted file mode 100644 index 9b982c0aeb..0000000000 --- a/changelogs/fragments/8542-fix-proxmox-volume-handling.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542). -minor_changes: - - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542). - - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542). diff --git a/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml b/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml deleted file mode 100644 index 5986a45b87..0000000000 --- a/changelogs/fragments/8545-keycloak-clientscope-remove-id-on-compare.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). diff --git a/changelogs/fragments/8557-fix-bug-with-bitwarden.yml b/changelogs/fragments/8557-fix-bug-with-bitwarden.yml deleted file mode 100644 index cf41ae209f..0000000000 --- a/changelogs/fragments/8557-fix-bug-with-bitwarden.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557)." \ No newline at end of file diff --git a/changelogs/fragments/8613-redfish_utils-language.yaml b/changelogs/fragments/8613-redfish_utils-language.yaml deleted file mode 100644 index 1fc43c895d..0000000000 --- a/changelogs/fragments/8613-redfish_utils-language.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613). diff --git a/changelogs/fragments/8614-nsupdate-index-out-of-range.yml b/changelogs/fragments/8614-nsupdate-index-out-of-range.yml deleted file mode 100644 index 00b6f8b974..0000000000 --- a/changelogs/fragments/8614-nsupdate-index-out-of-range.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614)." diff --git a/changelogs/fragments/8623-become-types.yml b/changelogs/fragments/8623-become-types.yml deleted file mode 100644 index c38e67eca1..0000000000 --- a/changelogs/fragments/8623-become-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623)." diff --git a/changelogs/fragments/8624-cache-types.yml b/changelogs/fragments/8624-cache-types.yml deleted file mode 100644 index 8efa34b6c0..0000000000 --- a/changelogs/fragments/8624-cache-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624)." diff --git a/changelogs/fragments/8625-inventory-types.yml b/changelogs/fragments/8625-inventory-types.yml deleted file mode 100644 index a89352a230..0000000000 --- a/changelogs/fragments/8625-inventory-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625)." diff --git a/changelogs/fragments/8626-lookup-types.yml b/changelogs/fragments/8626-lookup-types.yml deleted file mode 100644 index b6ebf35748..0000000000 --- a/changelogs/fragments/8626-lookup-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626)." diff --git a/changelogs/fragments/8627-connection-types.yml b/changelogs/fragments/8627-connection-types.yml deleted file mode 100644 index 9b92735fb8..0000000000 --- a/changelogs/fragments/8627-connection-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627)." diff --git a/changelogs/fragments/8628-callback-types.yml b/changelogs/fragments/8628-callback-types.yml deleted file mode 100644 index c223a85985..0000000000 --- a/changelogs/fragments/8628-callback-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628)." diff --git a/changelogs/fragments/8632-pkgng-add-option-use_globs.yml b/changelogs/fragments/8632-pkgng-add-option-use_globs.yml deleted file mode 100644 index d3e03959d5..0000000000 --- a/changelogs/fragments/8632-pkgng-add-option-use_globs.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633). diff --git a/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml b/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml deleted file mode 100644 index b3b03a008b..0000000000 --- a/changelogs/fragments/8646-fix-bug-in-proxmox-volumes.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). - - proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). - - proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). diff --git a/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml b/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml deleted file mode 100644 index d064725f14..0000000000 --- a/changelogs/fragments/8648-fix-gitlab-runner-paused.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648)." \ No newline at end of file diff --git a/changelogs/fragments/8652-Redfish-Password-Change-Required.yml b/changelogs/fragments/8652-Redfish-Password-Change-Required.yml deleted file mode 100644 index 44cfd41430..0000000000 --- a/changelogs/fragments/8652-Redfish-Password-Change-Required.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). diff --git a/changelogs/fragments/8654-add-redis-tls-params.yml b/changelogs/fragments/8654-add-redis-tls-params.yml deleted file mode 100644 index 0b549f5dd0..0000000000 --- a/changelogs/fragments/8654-add-redis-tls-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). diff --git a/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml b/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml deleted file mode 100644 index f67e11a6b0..0000000000 --- a/changelogs/fragments/8674-add-gitlab-project-cleanup-policy.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674). - - gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). diff --git a/changelogs/fragments/8675-pipx-install-suffix.yml b/changelogs/fragments/8675-pipx-install-suffix.yml deleted file mode 100644 index 4b5a9a99bc..0000000000 --- a/changelogs/fragments/8675-pipx-install-suffix.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656). diff --git a/changelogs/fragments/8679-fix-cloudflare-srv.yml b/changelogs/fragments/8679-fix-cloudflare-srv.yml deleted file mode 100644 index bf00fc1305..0000000000 --- a/changelogs/fragments/8679-fix-cloudflare-srv.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948). diff --git a/changelogs/fragments/8682-locale-gen-multiple.yaml b/changelogs/fragments/8682-locale-gen-multiple.yaml deleted file mode 100644 index 139f372353..0000000000 --- a/changelogs/fragments/8682-locale-gen-multiple.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682). diff --git a/changelogs/fragments/8688-gitlab_project-add-new-params.yml b/changelogs/fragments/8688-gitlab_project-add-new-params.yml deleted file mode 100644 index 0c6b8e505a..0000000000 --- a/changelogs/fragments/8688-gitlab_project-add-new-params.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688). - - gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688). - - gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688). diff --git a/changelogs/fragments/8689-passwordstore-lock-naming.yml b/changelogs/fragments/8689-passwordstore-lock-naming.yml deleted file mode 100644 index c5c9a82d78..0000000000 --- a/changelogs/fragments/8689-passwordstore-lock-naming.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). diff --git a/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml b/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml deleted file mode 100644 index b518d59e36..0000000000 --- a/changelogs/fragments/8695-keycloak_user_federation-mapper-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). \ No newline at end of file diff --git a/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml b/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml deleted file mode 100644 index 6a0cd74302..0000000000 --- a/changelogs/fragments/8708-homebrew_cask-fix-upgrade-all.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). \ No newline at end of file diff --git a/changelogs/fragments/8711-gconftool2-refactor.yml b/changelogs/fragments/8711-gconftool2-refactor.yml deleted file mode 100644 index ae214d95ec..0000000000 --- a/changelogs/fragments/8711-gconftool2-refactor.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). diff --git a/changelogs/fragments/8713-proxmox_lxc_interfaces.yml b/changelogs/fragments/8713-proxmox_lxc_interfaces.yml deleted file mode 100644 index 32c475157e..0000000000 --- a/changelogs/fragments/8713-proxmox_lxc_interfaces.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). \ No newline at end of file diff --git a/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml b/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml deleted file mode 100644 index 16e523d83d..0000000000 --- a/changelogs/fragments/8719-openiscsi-add-multiple-targets.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719). diff --git a/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml b/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml deleted file mode 100644 index ed3806bd5f..0000000000 --- a/changelogs/fragments/8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735). \ No newline at end of file diff --git a/changelogs/fragments/8738-limit-packages-for-copr.yml b/changelogs/fragments/8738-limit-packages-for-copr.yml deleted file mode 100644 index 0e49cc5cd9..0000000000 --- a/changelogs/fragments/8738-limit-packages-for-copr.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). \ No newline at end of file diff --git a/changelogs/fragments/8741-fix-opentelemetry-callback.yml b/changelogs/fragments/8741-fix-opentelemetry-callback.yml deleted file mode 100644 index 1b5e63a89f..0000000000 --- a/changelogs/fragments/8741-fix-opentelemetry-callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741). diff --git a/changelogs/fragments/8759-gitlab_project-sort-params.yml b/changelogs/fragments/8759-gitlab_project-sort-params.yml deleted file mode 100644 index 2ff2ed18a7..0000000000 --- a/changelogs/fragments/8759-gitlab_project-sort-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759). diff --git a/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml b/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml deleted file mode 100644 index 1a77b2f0d4..0000000000 --- a/changelogs/fragments/8760-gitlab_project-add-issues-access-level.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760). diff --git a/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml b/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml deleted file mode 100644 index 2d7d39345f..0000000000 --- a/changelogs/fragments/8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761). \ No newline at end of file diff --git a/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml b/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml deleted file mode 100644 index 08da8ae21a..0000000000 --- a/changelogs/fragments/8762-keycloac_user_federation-fix-key-error-when-updating.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). \ No newline at end of file diff --git a/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml b/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml deleted file mode 100644 index c457012751..0000000000 --- a/changelogs/fragments/8764-keycloak_user_federation-make-mapper-removal-optout.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764). \ No newline at end of file diff --git a/changelogs/fragments/8766-mh-deco-improve.yml b/changelogs/fragments/8766-mh-deco-improve.yml deleted file mode 100644 index 7bf104d2cc..0000000000 --- a/changelogs/fragments/8766-mh-deco-improve.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766). - - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). diff --git a/changelogs/fragments/8776-mute-vardict-deprecation.yml b/changelogs/fragments/8776-mute-vardict-deprecation.yml deleted file mode 100644 index a74e40e923..0000000000 --- a/changelogs/fragments/8776-mute-vardict-deprecation.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). - - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). diff --git a/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml b/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml deleted file mode 100644 index c8a6ff752a..0000000000 --- a/changelogs/fragments/8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). \ No newline at end of file diff --git a/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml b/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml deleted file mode 100644 index ba171a1178..0000000000 --- a/changelogs/fragments/8790-gitlab_project-fix-cleanup-policy-on-project-create.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790). - - gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790). diff --git a/changelogs/fragments/8791-mh-cause-changes-param-depr.yml b/changelogs/fragments/8791-mh-cause-changes-param-depr.yml deleted file mode 100644 index 7f7935af14..0000000000 --- a/changelogs/fragments/8791-mh-cause-changes-param-depr.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791). -deprecated_features: - - MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). diff --git a/changelogs/fragments/8793-pipx-global.yml b/changelogs/fragments/8793-pipx-global.yml deleted file mode 100644 index c3d7f5157f..0000000000 --- a/changelogs/fragments/8793-pipx-global.yml +++ /dev/null @@ -1,12 +0,0 @@ -minor_changes: - - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). - - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). -deprecated_features: - - > - pipx - - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 - (https://github.com/ansible-collections/community.general/pull/8793). - - > - pipx_info - - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 - (https://github.com/ansible-collections/community.general/pull/8793). diff --git a/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml b/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml deleted file mode 100644 index a94eace415..0000000000 --- a/changelogs/fragments/8794-Fixing-possible-concatination-error.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). diff --git a/changelogs/fragments/8796-gitlab-access-token-check-mode.yml b/changelogs/fragments/8796-gitlab-access-token-check-mode.yml deleted file mode 100644 index 6585584fac..0000000000 --- a/changelogs/fragments/8796-gitlab-access-token-check-mode.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). - - gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). diff --git a/changelogs/fragments/8809-pipx-new-params.yml b/changelogs/fragments/8809-pipx-new-params.yml deleted file mode 100644 index 775163e987..0000000000 --- a/changelogs/fragments/8809-pipx-new-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). diff --git a/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml b/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml deleted file mode 100644 index 82496d1083..0000000000 --- a/changelogs/fragments/8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). \ No newline at end of file diff --git a/changelogs/fragments/8814-dict-comprehension.yml b/changelogs/fragments/8814-dict-comprehension.yml deleted file mode 100644 index 01b5da4bae..0000000000 --- a/changelogs/fragments/8814-dict-comprehension.yml +++ /dev/null @@ -1,23 +0,0 @@ -minor_changes: - - hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). diff --git a/changelogs/fragments/8822-dict-comprehension.yml b/changelogs/fragments/8822-dict-comprehension.yml deleted file mode 100644 index cefb673bb8..0000000000 --- a/changelogs/fragments/8822-dict-comprehension.yml +++ /dev/null @@ -1,21 +0,0 @@ -minor_changes: - - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). diff --git a/changelogs/fragments/8823-keycloak-realm-key.yml b/changelogs/fragments/8823-keycloak-realm-key.yml deleted file mode 100644 index 4c0e591f8e..0000000000 --- a/changelogs/fragments/8823-keycloak-realm-key.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823). \ No newline at end of file diff --git a/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml b/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml deleted file mode 100644 index 63ac352057..0000000000 --- a/changelogs/fragments/8831-fix-error-when-mapper-id-is-provided.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). \ No newline at end of file diff --git a/changelogs/fragments/8833-dict-comprehension.yml b/changelogs/fragments/8833-dict-comprehension.yml deleted file mode 100644 index 1515609e69..0000000000 --- a/changelogs/fragments/8833-dict-comprehension.yml +++ /dev/null @@ -1,23 +0,0 @@ -minor_changes: - - redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). diff --git a/changelogs/fragments/8855-gio_mime_vardict.yml b/changelogs/fragments/8855-gio_mime_vardict.yml deleted file mode 100644 index 54efa08579..0000000000 --- a/changelogs/fragments/8855-gio_mime_vardict.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). diff --git a/changelogs/fragments/8856-jira_vardict.yml b/changelogs/fragments/8856-jira_vardict.yml deleted file mode 100644 index c4d8357419..0000000000 --- a/changelogs/fragments/8856-jira_vardict.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). diff --git a/changelogs/fragments/8858-dict-comprehension.yml b/changelogs/fragments/8858-dict-comprehension.yml deleted file mode 100644 index 47b4acb329..0000000000 --- a/changelogs/fragments/8858-dict-comprehension.yml +++ /dev/null @@ -1,11 +0,0 @@ -minor_changes: - - scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). diff --git a/changelogs/fragments/8876-dict-items-loop.yml b/changelogs/fragments/8876-dict-items-loop.yml deleted file mode 100644 index 6bd170c7b2..0000000000 --- a/changelogs/fragments/8876-dict-items-loop.yml +++ /dev/null @@ -1,16 +0,0 @@ -minor_changes: - - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). - - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). diff --git a/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml b/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml deleted file mode 100644 index 3e19866289..0000000000 --- a/changelogs/fragments/8877-keycloak_realm-sort-lists-before-change-detection.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). \ No newline at end of file diff --git a/changelogs/fragments/8885-add-force-flag-for-nmp.yml b/changelogs/fragments/8885-add-force-flag-for-nmp.yml deleted file mode 100644 index 40eaeff74b..0000000000 --- a/changelogs/fragments/8885-add-force-flag-for-nmp.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). diff --git a/changelogs/fragments/8887-fix-one_service-unique.yml b/changelogs/fragments/8887-fix-one_service-unique.yml deleted file mode 100644 index 979460b862..0000000000 --- a/changelogs/fragments/8887-fix-one_service-unique.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887). diff --git a/changelogs/fragments/8889-refactor-one-image-modules.yml b/changelogs/fragments/8889-refactor-one-image-modules.yml deleted file mode 100644 index de552c17a6..0000000000 --- a/changelogs/fragments/8889-refactor-one-image-modules.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: - - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889). - - one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). - - one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). - - one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). - - one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). diff --git a/changelogs/fragments/8895-fix-comprehension.yaml b/changelogs/fragments/8895-fix-comprehension.yaml deleted file mode 100644 index aecd0fd83e..0000000000 --- a/changelogs/fragments/8895-fix-comprehension.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888). diff --git a/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml b/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml deleted file mode 100644 index 68f481452c..0000000000 --- a/changelogs/fragments/8897-nmcli-add-reload-and-up-down.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). - - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). diff --git a/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml b/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml deleted file mode 100644 index 8f86d510f9..0000000000 --- a/changelogs/fragments/8898-add-arg-to-exclude-bind-credential-from-change-check.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). \ No newline at end of file diff --git a/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml b/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml deleted file mode 100644 index c7347e879f..0000000000 --- a/changelogs/fragments/8900-ipa-hostgroup-fix-states.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900). diff --git a/changelogs/fragments/8907-fix-one-host-id.yml b/changelogs/fragments/8907-fix-one-host-id.yml deleted file mode 100644 index 78fc4080b1..0000000000 --- a/changelogs/fragments/8907-fix-one-host-id.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907). diff --git a/changelogs/fragments/8908-add-gitlab-group-params.yml b/changelogs/fragments/8908-add-gitlab-group-params.yml deleted file mode 100644 index 12de77b43a..0000000000 --- a/changelogs/fragments/8908-add-gitlab-group-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). diff --git a/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml b/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml deleted file mode 100644 index 26a9379235..0000000000 --- a/changelogs/fragments/8909-flatpak-improve-name-parsing.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909). diff --git a/changelogs/fragments/8917-proxmox-clean-auth.yml b/changelogs/fragments/8917-proxmox-clean-auth.yml deleted file mode 100644 index 0681f326a6..0000000000 --- a/changelogs/fragments/8917-proxmox-clean-auth.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). diff --git a/changelogs/fragments/8920-ipa-host-fix-state.yml b/changelogs/fragments/8920-ipa-host-fix-state.yml deleted file mode 100644 index 0f3df64b6a..0000000000 --- a/changelogs/fragments/8920-ipa-host-fix-state.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). diff --git a/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml b/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml deleted file mode 100644 index 5b3c18ba2c..0000000000 --- a/changelogs/fragments/8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). \ No newline at end of file diff --git a/changelogs/fragments/8925-atomic.yml b/changelogs/fragments/8925-atomic.yml deleted file mode 100644 index 75e48a1dba..0000000000 --- a/changelogs/fragments/8925-atomic.yml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - "ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." - - "java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." - - "jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." - - "kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." - - "pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925)." diff --git a/changelogs/fragments/8928-cmd-runner-10.0.0.yml b/changelogs/fragments/8928-cmd-runner-10.0.0.yml deleted file mode 100644 index bbeb838439..0000000000 --- a/changelogs/fragments/8928-cmd-runner-10.0.0.yml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: - - cmd_runner module utils - CLI arguments created directly from module parameters are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928). diff --git a/changelogs/fragments/8929-cmd_runner-bugfix.yml b/changelogs/fragments/8929-cmd_runner-bugfix.yml deleted file mode 100644 index 2d8e0170f6..0000000000 --- a/changelogs/fragments/8929-cmd_runner-bugfix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929). diff --git a/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml b/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml deleted file mode 100644 index 6b66918234..0000000000 --- a/changelogs/fragments/8937-add-StorageId-RedfishURI-to-disk-facts.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). \ No newline at end of file diff --git a/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml b/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml deleted file mode 100644 index 93f57cd86a..0000000000 --- a/changelogs/fragments/8940-keycloak_userprofile-improve-diff.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). \ No newline at end of file diff --git a/changelogs/fragments/8944-django-command-fix.yml b/changelogs/fragments/8944-django-command-fix.yml deleted file mode 100644 index 755bf5628a..0000000000 --- a/changelogs/fragments/8944-django-command-fix.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). - - django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). diff --git a/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml b/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml deleted file mode 100644 index 73bf1710e7..0000000000 --- a/changelogs/fragments/8952-password-store-lookup-create-subkey-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). \ No newline at end of file diff --git a/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml b/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml deleted file mode 100644 index cd8347faf0..0000000000 --- a/changelogs/fragments/8954-keycloak-user-federation-add-referral-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). \ No newline at end of file diff --git a/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml b/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml deleted file mode 100644 index d6879ccb06..0000000000 --- a/changelogs/fragments/8956-remove-capacitybytes-from-the-required-parameters_list.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). diff --git a/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml b/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml deleted file mode 100644 index be8adf25e3..0000000000 --- a/changelogs/fragments/8964-cmd-runner-argformat-refactor.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd_runner module utils - refactor argument formatting code to its own Python module (https://github.com/ansible-collections/community.general/pull/8964). diff --git a/changelogs/fragments/8966-dig-add-port-option.yml b/changelogs/fragments/8966-dig-add-port-option.yml deleted file mode 100644 index e92f355dd5..0000000000 --- a/changelogs/fragments/8966-dig-add-port-option.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). -... diff --git a/changelogs/fragments/8970-fix-dig-multi-nameservers.yml b/changelogs/fragments/8970-fix-dig-multi-nameservers.yml deleted file mode 100644 index e7f93853e9..0000000000 --- a/changelogs/fragments/8970-fix-dig-multi-nameservers.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). \ No newline at end of file diff --git a/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml b/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml deleted file mode 100644 index a7bc125f82..0000000000 --- a/changelogs/fragments/8973-keycloak_client-add-x509-auth.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973). diff --git a/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml b/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml deleted file mode 100644 index c64a09add6..0000000000 --- a/changelogs/fragments/8979-keycloak_group-fix-subgroups.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979). diff --git a/changelogs/fragments/8987-legacycrypt.yml b/changelogs/fragments/8987-legacycrypt.yml deleted file mode 100644 index ce955f3564..0000000000 --- a/changelogs/fragments/8987-legacycrypt.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987)." - - "udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987)." diff --git a/changelogs/fragments/8989-github-app-token-from-fact.yml b/changelogs/fragments/8989-github-app-token-from-fact.yml deleted file mode 100644 index 6b36d95a62..0000000000 --- a/changelogs/fragments/8989-github-app-token-from-fact.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - github_app_access_token lookup plugin - adds new ``private_key`` parameter (https://github.com/ansible-collections/community.general/pull/8989). diff --git a/changelogs/fragments/8990.yml b/changelogs/fragments/8990.yml deleted file mode 100644 index 716fd3c983..0000000000 --- a/changelogs/fragments/8990.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - redfish_config - add parameter ``storage_none_volume_deletion`` to - ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). \ No newline at end of file diff --git a/changelogs/fragments/9010-edit-gitlab-label-color.yaml b/changelogs/fragments/9010-edit-gitlab-label-color.yaml deleted file mode 100644 index 0959e57772..0000000000 --- a/changelogs/fragments/9010-edit-gitlab-label-color.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). diff --git a/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml b/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml deleted file mode 100644 index 131ee68c7c..0000000000 --- a/changelogs/fragments/9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - redfish_utils module utils - schedule a BIOS configuration job at next - reboot when the BIOS config is changed - (https://github.com/ansible-collections/community.general/pull/9012). diff --git a/changelogs/fragments/9019-onevnet-bugfix.yml b/changelogs/fragments/9019-onevnet-bugfix.yml deleted file mode 100644 index 3da3ea0399..0000000000 --- a/changelogs/fragments/9019-onevnet-bugfix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). diff --git a/changelogs/fragments/9022-improve-homebrew-perf.yml b/changelogs/fragments/9022-improve-homebrew-perf.yml deleted file mode 100644 index 077b5caefc..0000000000 --- a/changelogs/fragments/9022-improve-homebrew-perf.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022). diff --git a/changelogs/fragments/9026-consul_kv-datacenter.yml b/changelogs/fragments/9026-consul_kv-datacenter.yml deleted file mode 100644 index 73ddd69266..0000000000 --- a/changelogs/fragments/9026-consul_kv-datacenter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026). diff --git a/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml b/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml deleted file mode 100644 index 7866cc53b8..0000000000 --- a/changelogs/fragments/9027-support-organizations-in-keycloak-realm.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_realm - add boolean toggle to configure organization support for a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027, https://github.com/ansible-collections/community.general/pull/8927/). diff --git a/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml b/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml deleted file mode 100644 index d542692f45..0000000000 --- a/changelogs/fragments/9028-bitwarden-secrets-manager-syntax-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028)." \ No newline at end of file diff --git a/changelogs/fragments/9044-pipx-fixes.yml b/changelogs/fragments/9044-pipx-fixes.yml deleted file mode 100644 index dbf0e3c10d..0000000000 --- a/changelogs/fragments/9044-pipx-fixes.yml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). - - pipx_info - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). - - pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044). -bugfixes: - - pipx module utils - add missing command line formatter for argument ``spec_metadata`` (https://github.com/ansible-collections/community.general/pull/9044). - - pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044). diff --git a/changelogs/fragments/9047-redfish-uri-parsing.yml b/changelogs/fragments/9047-redfish-uri-parsing.yml deleted file mode 100644 index 83c9450f44..0000000000 --- a/changelogs/fragments/9047-redfish-uri-parsing.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057). diff --git a/changelogs/fragments/9052-modprobe-bugfix.yml b/changelogs/fragments/9052-modprobe-bugfix.yml deleted file mode 100644 index b9519e9055..0000000000 --- a/changelogs/fragments/9052-modprobe-bugfix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052). diff --git a/changelogs/fragments/9056-fix-one_image-modules.yml b/changelogs/fragments/9056-fix-one_image-modules.yml deleted file mode 100644 index 31b85904fa..0000000000 --- a/changelogs/fragments/9056-fix-one_image-modules.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). - - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). diff --git a/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml b/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml deleted file mode 100644 index 066a84e1e9..0000000000 --- a/changelogs/fragments/9059-redfish_command-updateuseraccounttypes.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058, https://github.com/ansible-collections/community.general/pull/9059). diff --git a/changelogs/fragments/9060-ansible-galaxy-install-version.yml b/changelogs/fragments/9060-ansible-galaxy-install-version.yml deleted file mode 100644 index 87d5137ad2..0000000000 --- a/changelogs/fragments/9060-ansible-galaxy-install-version.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060). diff --git a/changelogs/fragments/9061-cpanm-version.yml b/changelogs/fragments/9061-cpanm-version.yml deleted file mode 100644 index af91cac1c0..0000000000 --- a/changelogs/fragments/9061-cpanm-version.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061). diff --git a/changelogs/fragments/9063-django-version.yml b/changelogs/fragments/9063-django-version.yml deleted file mode 100644 index 3d0287a756..0000000000 --- a/changelogs/fragments/9063-django-version.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063). - - django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). - - django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). - - django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). diff --git a/changelogs/fragments/9064-gconftool2-version.yml b/changelogs/fragments/9064-gconftool2-version.yml deleted file mode 100644 index 7913c76a81..0000000000 --- a/changelogs/fragments/9064-gconftool2-version.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064). - - gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). - - gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). diff --git a/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml b/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml deleted file mode 100644 index 91e9127b70..0000000000 --- a/changelogs/fragments/9066-proxmox-kvm-ciupgrade.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066). diff --git a/changelogs/fragments/9067-gio-mime-version.yml b/changelogs/fragments/9067-gio-mime-version.yml deleted file mode 100644 index 9e2fb76082..0000000000 --- a/changelogs/fragments/9067-gio-mime-version.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067). - - gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067). diff --git a/changelogs/fragments/9075-add-creation-oneimage.yml b/changelogs/fragments/9075-add-creation-oneimage.yml deleted file mode 100644 index 96420d24ef..0000000000 --- a/changelogs/fragments/9075-add-creation-oneimage.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - one_image - add ``create``, ``template`` and ``datastore_id`` arguments for image creation (https://github.com/ansible-collections/community.general/pull/9075). - - one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075). diff --git a/changelogs/fragments/9084-collection_version-importlib.yml b/changelogs/fragments/9084-collection_version-importlib.yml deleted file mode 100644 index 827b9653d2..0000000000 --- a/changelogs/fragments/9084-collection_version-importlib.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084)." diff --git a/changelogs/fragments/9084-jenkins_node-add-offline-message.yml b/changelogs/fragments/9084-jenkins_node-add-offline-message.yml deleted file mode 100644 index 3718127513..0000000000 --- a/changelogs/fragments/9084-jenkins_node-add-offline-message.yml +++ /dev/null @@ -1,8 +0,0 @@ -minor_changes: - - jenkins_node - add ``offline_message`` parameter for updating a Jenkins node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)." - -bugfixes: - - jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084). - -known_issues: - - jenkins_node - the module is not able to update offline message when node is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084). diff --git a/changelogs/fragments/9086-gio-mime-version.yml b/changelogs/fragments/9086-gio-mime-version.yml deleted file mode 100644 index 46c3e6cec8..0000000000 --- a/changelogs/fragments/9086-gio-mime-version.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086). diff --git a/changelogs/fragments/9087-mattermost-priority.yaml b/changelogs/fragments/9087-mattermost-priority.yaml deleted file mode 100644 index f66d4189cc..0000000000 --- a/changelogs/fragments/9087-mattermost-priority.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068, https://github.com/ansible-collections/community.general/pull/9087). diff --git a/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml b/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml deleted file mode 100644 index b51eb24136..0000000000 --- a/changelogs/fragments/9092-keycloak-clientscope-type-fix-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093). diff --git a/changelogs/fragments/9099-proxmox-fix-insecure.yml b/changelogs/fragments/9099-proxmox-fix-insecure.yml deleted file mode 100644 index b277a0f933..0000000000 --- a/changelogs/fragments/9099-proxmox-fix-insecure.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). diff --git a/changelogs/fragments/deprecate-hipchat.yml b/changelogs/fragments/deprecate-hipchat.yml deleted file mode 100644 index 256991ce3b..0000000000 --- a/changelogs/fragments/deprecate-hipchat.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - "hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919)." diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml deleted file mode 100644 index c8f4f6150a..0000000000 --- a/changelogs/fragments/deprecations.yml +++ /dev/null @@ -1,8 +0,0 @@ -removed_features: - - "redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead (https://github.com/ansible-collections/community.general/pull/8918)." - - "proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly specify the old default values if you were using ``proxmox_default_behavior=compatibility``, otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918)." - - "ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918)." - - "consul - removed the ``ack_params_state_absent`` option. It had no effect anymore (https://github.com/ansible-collections/community.general/pull/8918)." -breaking_changes: - - "irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/8918)." - - "rhsm_repository - the states ``present`` and ``absent`` have been removed. Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918)." diff --git a/changelogs/fragments/removals.yml b/changelogs/fragments/removals.yml deleted file mode 100644 index 1a1f137194..0000000000 --- a/changelogs/fragments/removals.yml +++ /dev/null @@ -1,10 +0,0 @@ -removed_features: - - "The hipchat callback plugin has been removed. The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020 (https://github.com/ansible-collections/community.general/pull/8921)." - - "The consul_acl module has been removed. Use community.general.consul_token and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921)." - - "The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921)." - - "The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921)." - - "The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921)." -breaking_changes: - - The collection no longer supports ansible-core 2.13 and ansible-core 2.14. - While most (or even all) modules and plugins might still work with these versions, they are no longer tested in CI and breakages regarding them will not be fixed - (https://github.com/ansible-collections/community.general/pull/8921)." diff --git a/galaxy.yml b/galaxy.yml index 3af5356d06..4daf0e0ac0 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 10.0.0 +version: 10.1.0 readme: README.md authors: - Ansible (https://github.com/ansible) From e13d6de2508cc0601d1b7dbb4b51a3b7d50c3c20 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 4 Nov 2024 19:23:07 +0100 Subject: [PATCH 324/482] Adjust nightly CI schedules. --- .azure-pipelines/azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 97f76b3ba9..fe8624872a 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -29,14 +29,14 @@ schedules: always: true branches: include: + - stable-10 - stable-9 - - stable-8 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-7 + - stable-8 variables: - name: checkoutPath From 04c2ad18da28677590643176e3456f0e7be6ea1d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 8 Nov 2024 23:05:35 +0100 Subject: [PATCH 325/482] Add FreeBSD 13.4 to CI (#9109) * Add FreeBSD 13.4 to CI. * iso_extract won't work. * pkgng: jail won't work either. --- .azure-pipelines/azure-pipelines.yml | 2 ++ tests/integration/targets/iso_extract/aliases | 1 + tests/integration/targets/pkgng/tasks/freebsd.yml | 5 ++++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index fe8624872a..362b5d59dd 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -196,6 +196,8 @@ stages: test: rhel/9.4 - name: FreeBSD 14.1 test: freebsd/14.1 + - name: FreeBSD 13.4 + test: freebsd/13.4 groups: - 1 - 2 diff --git a/tests/integration/targets/iso_extract/aliases b/tests/integration/targets/iso_extract/aliases index 27e07941a5..0fee9ee2e1 100644 --- a/tests/integration/targets/iso_extract/aliases +++ b/tests/integration/targets/iso_extract/aliases @@ -15,5 +15,6 @@ skip/rhel9.4 # FIXME skip/freebsd12.4 # FIXME skip/freebsd13.2 # FIXME skip/freebsd13.3 # FIXME +skip/freebsd13.4 # FIXME skip/freebsd14.0 # FIXME skip/freebsd14.1 # FIXME diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml index e69d26c20d..612e7c4d42 100644 --- a/tests/integration/targets/pkgng/tasks/freebsd.yml +++ b/tests/integration/targets/pkgng/tasks/freebsd.yml @@ -518,6 +518,9 @@ # NOTE: FreeBSD 13.3 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 13.4 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # # NOTE: FreeBSD 14.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # @@ -528,7 +531,7 @@ # https://github.com/ansible-collections/community.general/issues/5795 when: >- (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<')) - or (ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14.0', '<')) + or (ansible_distribution_version is version('13.5', '>=') and ansible_distribution_version is version('14.0', '<')) or ansible_distribution_version is version('14.2', '>=') block: - name: Setup testjail From 195ae4afdee6f58b1afe8aca382ccba998e64c0f Mon Sep 17 00:00:00 2001 From: alexander <79072457+abakanovskii@users.noreply.github.com> Date: Sat, 9 Nov 2024 01:07:58 +0300 Subject: [PATCH 326/482] ipa_getkeytab: Fix example task (#9104) Fix ipa_getkeytab example task --- plugins/modules/ipa_getkeytab.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py index 3d4f81d5b1..643e18cf62 100644 --- a/plugins/modules/ipa_getkeytab.py +++ b/plugins/modules/ipa_getkeytab.py @@ -98,11 +98,9 @@ extends_documentation_fragment: ''' EXAMPLES = r''' -- name: Get kerberos ticket - ansible.builtin.shell: kinit admin - args: - stdin: "{{ aldpro_admin_password }}" - changed_when: true +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: "{{ aldpro_admin_password }}" - name: Create keytab community.general.ipa_getkeytab: From 62cb6087b59064798a51fb90958300827343fdce Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 11 Nov 2024 20:01:47 +0100 Subject: [PATCH 327/482] keycloak_client: remove code that turns attributes dict into list (#9077) * remove code that turns attributes dict into list * add changelog fragment * Update changelogs/fragments/9077-keycloak_client-fix-attributes-dict-turned-into-list.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...oak_client-fix-attributes-dict-turned-into-list.yml | 2 ++ plugins/modules/keycloak_client.py | 10 ---------- 2 files changed, 2 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/9077-keycloak_client-fix-attributes-dict-turned-into-list.yml diff --git a/changelogs/fragments/9077-keycloak_client-fix-attributes-dict-turned-into-list.yml b/changelogs/fragments/9077-keycloak_client-fix-attributes-dict-turned-into-list.yml new file mode 100644 index 0000000000..d693c2e139 --- /dev/null +++ b/changelogs/fragments/9077-keycloak_client-fix-attributes-dict-turned-into-list.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_client - fix diff by removing code that turns the attributes dict which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077). \ No newline at end of file diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index d2800be292..62015bc79f 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -805,9 +805,6 @@ def normalise_cr(clientrep, remove_ids=False): # Avoid the dict passed in to be modified clientrep = clientrep.copy() - if 'attributes' in clientrep: - clientrep['attributes'] = list(sorted(clientrep['attributes'])) - if 'defaultClientScopes' in clientrep: clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) @@ -1024,13 +1021,6 @@ def main(): for client_param in client_params: new_param_value = module.params.get(client_param) - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if client_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if client_param == 'protocol_mappers': From d27d86ecb108c391aa7c8153e9944a8369f8d065 Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 11 Nov 2024 20:02:41 +0100 Subject: [PATCH 328/482] keycloak_clientscope: remove code turning attributes dict into list (#9082) * remove code turning attributes dict into list * add changelog fragment --- ...lientscope-fix-attributes-dict-turned-into-list.yml | 2 ++ plugins/modules/keycloak_clientscope.py | 10 ---------- 2 files changed, 2 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml diff --git a/changelogs/fragments/9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml b/changelogs/fragments/9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml new file mode 100644 index 0000000000..c9d61780b2 --- /dev/null +++ b/changelogs/fragments/9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientscope - fix diff and ``end_state`` by removing the code that turns the attributes dict, which contains additional config items, into a list (https://github.com/ansible-collections/community.general/pull/9082). \ No newline at end of file diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index 576a831bdb..35ac3d9500 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -317,9 +317,6 @@ def normalise_cr(clientscoperep, remove_ids=False): # Avoid the dict passed in to be modified clientscoperep = clientscoperep.copy() - if 'attributes' in clientscoperep: - clientscoperep['attributes'] = list(sorted(clientscoperep['attributes'])) - if 'protocolMappers' in clientscoperep: clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) for mapper in clientscoperep['protocolMappers']: @@ -418,13 +415,6 @@ def main(): for clientscope_param in clientscope_params: new_param_value = module.params.get(clientscope_param) - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if clientscope_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if clientscope_param == 'protocol_mappers': From bafb8aca292dfb365a6386cc52e4de40f8e6bfc6 Mon Sep 17 00:00:00 2001 From: Tan Siewert Date: Mon, 11 Nov 2024 11:04:14 -0800 Subject: [PATCH 329/482] redfish_utils: remove undocumented default applytime (#9114) * redfish_utils: remove undocumented default applytime The `@Redfish.OperationApplyTime` parameter is optional as per Redfish spec version 1.21.0, paragraph 7.11 [1]. Some systems reject the request rather than ignore it, causing failures that can not be workarounded. Removing this default resolves compatibility issues. [1] https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html Signed-off-by: Tan Siewert * redfish_utils: fix changelog fragment to bugfix Signed-off-by: Tan Siewert --------- Signed-off-by: Tan Siewert --- .../9114-redfish-utils-update-remove-default-applytime.yml | 2 ++ plugins/module_utils/redfish_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9114-redfish-utils-update-remove-default-applytime.yml diff --git a/changelogs/fragments/9114-redfish-utils-update-remove-default-applytime.yml b/changelogs/fragments/9114-redfish-utils-update-remove-default-applytime.yml new file mode 100644 index 0000000000..672545a0a8 --- /dev/null +++ b/changelogs/fragments/9114-redfish-utils-update-remove-default-applytime.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index f795eac6cd..9f638c51f4 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1958,7 +1958,7 @@ class RedfishUtils(object): update_uri = data['MultipartHttpPushUri'] # Assemble the JSON payload portion of the request - payload = {"@Redfish.OperationApplyTime": "Immediate"} + payload = {} if targets: payload["Targets"] = targets if apply_time: From 9596995ffc2972a8edf72e6532d04eb0d83b97c2 Mon Sep 17 00:00:00 2001 From: dronenb Date: Sat, 16 Nov 2024 10:31:45 -0700 Subject: [PATCH 330/482] homebrew_cask: add + to valid cask chars (#9128) * fix(homebrew_cask): add + to valid cask chars * docs(homebrew_cask): add changelog fragment Signed-off-by: Ben Dronen * fix(homebrew_cask): add PR link to changelog fragment Signed-off-by: Ben Dronen * fix: add period to end of changelog fragment Signed-off-by: Ben Dronen * fix: remove blank line from changelog fragment Signed-off-by: Ben Dronen * fix: changelog fragment formatting Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Update changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml Co-authored-by: Felix Fontein --------- Signed-off-by: Ben Dronen Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml | 2 ++ plugins/modules/homebrew_cask.py | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml diff --git a/changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml b/changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml new file mode 100644 index 0000000000..69765958fb --- /dev/null +++ b/changelogs/fragments/9128-homebrew_cask-name-regex-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex (https://github.com/ansible-collections/community.general/pull/9128). diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index 9902cb1373..83901b4dbe 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -190,6 +190,7 @@ class HomebrewCask(object): / # slash (for taps) \- # dashes @ # at symbol + \+ # plus symbol ''' INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) From 1f786a6171c1fc357e50fed2c9d725b51350ec1d Mon Sep 17 00:00:00 2001 From: Tan Siewert Date: Sat, 16 Nov 2024 09:32:49 -0800 Subject: [PATCH 331/482] redfish_command: add update_custom_oem options (#9123) * redfish_command: add update_custom_oem options The Multipart HTTP push update implementation allows OEM specific parts that are not part of the `UpdateParameters` body part, but a separate one. This OEM part shall start with `Oem` and is optional. The OEM part implementation is specified in the Redfish spec point 12.6.2.2 [1]. Right now, the implementation will only support JSON as MIME Type, although it is not limited to JSON. [1] https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html#oem Signed-off-by: Tan Siewert * redfish_command: add option to set custom mime type The implementation of using a custom MIME type will also remove the default JSON type. Converting the payload to JSON or any other type is up to the user. Signed-off-by: Tan Siewert * redfish_command: apply docs changes from review Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * redfish_command: add mime type option to changelog Co-authored-by: Felix Fontein --------- Signed-off-by: Tan Siewert Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- ...9123-redfish-command-custom-oem-params.yml | 2 + plugins/module_utils/redfish_utils.py | 8 +++ plugins/modules/redfish_command.py | 54 +++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 changelogs/fragments/9123-redfish-command-custom-oem-params.yml diff --git a/changelogs/fragments/9123-redfish-command-custom-oem-params.yml b/changelogs/fragments/9123-redfish-command-custom-oem-params.yml new file mode 100644 index 0000000000..a09219515a --- /dev/null +++ b/changelogs/fragments/9123-redfish-command-custom-oem-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add ``update_custom_oem_header``, ``update_custom_oem_params``, and ``update_custom_oem_mime_type`` options (https://github.com/ansible-collections/community.general/pull/9123). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 9f638c51f4..388fc93669 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1933,6 +1933,9 @@ class RedfishUtils(object): targets = update_opts.get('update_targets') apply_time = update_opts.get('update_apply_time') oem_params = update_opts.get('update_oem_params') + custom_oem_header = update_opts.get('update_custom_oem_header') + custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type') + custom_oem_params = update_opts.get('update_custom_oem_params') # Ensure the image file is provided if not image_file: @@ -1969,6 +1972,11 @@ class RedfishUtils(object): 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'}, 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'} } + if custom_oem_params: + multipart_payload[custom_oem_header] = {'content': custom_oem_params} + if custom_oem_mime_type: + multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type + response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True) if response['ret'] is False: return response diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index 103f9e1d50..829b77897d 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -216,6 +216,36 @@ options: - Handle to check the status of an update in progress. type: str version_added: '6.1.0' + update_custom_oem_header: + required: false + description: + - Optional OEM header, sent as separate form-data for + the Multipart HTTP push update. + - The header shall start with "Oem" according to DMTF + Redfish spec 12.6.2.2. + - For more details, see U(https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html) + - If set, then O(update_custom_oem_params) is required too. + type: str + version_added: '10.1.0' + update_custom_oem_params: + required: false + description: + - Custom OEM properties for HTTP Multipart Push updates. + - If set, then O(update_custom_oem_header) is required too. + - The properties will be passed raw without any validation or conversion by Ansible. + This means the content can be a file, a string, or any other data. + If the content is a dict that should be converted to JSON, then the + content must be converted to JSON before passing it to this module using the + P(ansible.builtin.to_json#filter) filter. + type: raw + version_added: '10.1.0' + update_custom_oem_mime_type: + required: false + description: + - MIME Type for custom OEM properties for HTTP Multipart + Push updates. + type: str + version_added: '10.1.0' virtual_media: required: false description: @@ -654,6 +684,23 @@ EXAMPLES = ''' update_oem_params: PreserveConfiguration: false + - name: Multipart HTTP push with custom OEM options + vars: + oem_payload: + ImageType: BMC + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_custom_oem_header: OemParameters + update_custom_oem_mime_type: "application/json" + update_custom_oem_params: "{{ oem_payload | to_json }}" + - name: Perform requested operations to continue the update community.general.redfish_command: category: Update @@ -863,6 +910,9 @@ def main(): update_protocol=dict(), update_targets=dict(type='list', elements='str', default=[]), update_oem_params=dict(type='dict'), + update_custom_oem_header=dict(type='str'), + update_custom_oem_mime_type=dict(type='str'), + update_custom_oem_params=dict(type='raw'), update_creds=dict( type='dict', options=dict( @@ -895,6 +945,7 @@ def main(): ), required_together=[ ('username', 'password'), + ('update_custom_oem_header', 'update_custom_oem_params'), ], required_one_of=[ ('username', 'auth_token'), @@ -941,6 +992,9 @@ def main(): 'update_creds': module.params['update_creds'], 'update_apply_time': module.params['update_apply_time'], 'update_oem_params': module.params['update_oem_params'], + 'update_custom_oem_header': module.params['update_custom_oem_header'], + 'update_custom_oem_params': module.params['update_custom_oem_params'], + 'update_custom_oem_mime_type': module.params['update_custom_oem_mime_type'], 'update_handle': module.params['update_handle'], } From 737717d015f5e8ab6878662cacfa82cd39cd0e07 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Sat, 16 Nov 2024 17:33:35 +0000 Subject: [PATCH 332/482] launchd: Add plist option (#9102) This allows the module to be used with services such as com.openssh.sshd, when the name of the plist file doesn't match the service name. fixes #5932 --- changelogs/fragments/5932-launchd-plist.yml | 2 ++ plugins/modules/launchd.py | 31 +++++++++++++++++---- 2 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/5932-launchd-plist.yml diff --git a/changelogs/fragments/5932-launchd-plist.yml b/changelogs/fragments/5932-launchd-plist.yml new file mode 100644 index 0000000000..bf2530841a --- /dev/null +++ b/changelogs/fragments/5932-launchd-plist.yml @@ -0,0 +1,2 @@ +minor_changes: + - launchd - add ``plist`` option for services such as sshd, where the plist filename doesn't match the service name (https://github.com/ansible-collections/community.general/pull/9102). diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index a6427bdb2f..9717825c71 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -30,6 +30,12 @@ options: - Name of the service. type: str required: true + plist: + description: + - Name of the V(.plist) file for the service. + - Defaults to V({name}.plist). + type: str + version_added: 10.1.0 state: description: - V(started)/V(stopped) are idempotent actions that will not run @@ -100,6 +106,12 @@ EXAMPLES = r''' community.general.launchd: name: org.memcached state: unloaded + +- name: restart sshd + community.general.launchd: + name: com.openssh.sshd + plist: ssh.plist + state: restarted ''' RETURN = r''' @@ -145,25 +157,31 @@ class ServiceState: class Plist: - def __init__(self, module, service): + def __init__(self, module, service, filename=None): self.__changed = False self.__service = service + if filename is not None: + self.__filename = filename + else: + self.__filename = '%s.plist' % service state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() # Check if readPlist is available or not self.old_plistlib = hasattr(plistlib, 'readPlist') - self.__file = self.__find_service_plist(self.__service) + self.__file = self.__find_service_plist(self.__filename) if self.__file is None: - msg = 'Unable to infer the path of %s service plist file' % self.__service + msg = 'Unable to find the plist file %s for service %s' % ( + self.__filename, self.__service, + ) if pid is None and state == ServiceState.UNLOADED: msg += ' and it was not found among active services' module.fail_json(msg=msg) self.__update(module) @staticmethod - def __find_service_plist(service_name): + def __find_service_plist(filename): """Finds the plist file associated with a service""" launchd_paths = [ @@ -180,7 +198,6 @@ class Plist: except OSError: continue - filename = '%s.plist' % service_name if filename in files: return os.path.join(path, filename) return None @@ -461,6 +478,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), + plist=dict(type='str'), state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']), enabled=dict(type='bool'), force_stop=dict(type='bool', default=False), @@ -472,6 +490,7 @@ def main(): ) service = module.params['name'] + plist_filename = module.params['plist'] action = module.params['state'] rc = 0 out = err = '' @@ -483,7 +502,7 @@ def main(): # We will tailor the plist file in case one of the options # (enabled, force_stop) was specified. - plist = Plist(module, service) + plist = Plist(module, service, plist_filename) result['changed'] = plist.is_changed() # Gather information about the service to be controlled. From 523439ab629eb942073de749b1ef2a1e0becd836 Mon Sep 17 00:00:00 2001 From: Stanislav Shamilov Date: Sat, 16 Nov 2024 19:34:09 +0200 Subject: [PATCH 333/482] alternatives: add support for "family" parameter (#9096) * alternatives: added parsing and setting of 'family' for an alternative * alternatives: added checks for path nullability * alternatives: added idempotence when setting alternative using family * alternatives: added family to diff mode * alternatives: added tests for family * alternatives: updated documentation and examples * alternatives: added constraints for 'path' and 'family' parameters. in any invariants at least one of the parameters must be specified * alternatives: added changelog fragment * removed unnecessary check * added version Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...9096-alternatives-add-family-parameter.yml | 2 + plugins/modules/alternatives.py | 60 +++++++++++++---- .../targets/alternatives/tasks/main.yml | 6 ++ .../alternatives/tasks/tests_family.yml | 65 +++++++++++++++++++ 4 files changed, 120 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/9096-alternatives-add-family-parameter.yml create mode 100644 tests/integration/targets/alternatives/tasks/tests_family.yml diff --git a/changelogs/fragments/9096-alternatives-add-family-parameter.yml b/changelogs/fragments/9096-alternatives-add-family-parameter.yml new file mode 100644 index 0000000000..a0b021f892 --- /dev/null +++ b/changelogs/fragments/9096-alternatives-add-family-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - alternatives - add ``family`` parameter that allows to utilize the ``--family`` option available in RedHat version of update-alternatives (https://github.com/ansible-collections/community.general/issues/5060, https://github.com/ansible-collections/community.general/pull/9096). diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py index da578276fa..d049c82b11 100644 --- a/plugins/modules/alternatives.py +++ b/plugins/modules/alternatives.py @@ -39,7 +39,11 @@ options: description: - The path to the real executable that the link should point to. type: path - required: true + family: + description: + - The family groups similar alternatives. This option is available only on RHEL-based distributions. + type: str + version_added: 10.1.0 link: description: - The path to the symbolic link that should point to the real executable. @@ -98,6 +102,12 @@ EXAMPLES = r''' name: java path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java +- name: Select java-11-openjdk.x86_64 family + community.general.alternatives: + name: java + family: java-11-openjdk.x86_64 + when: ansible_os_family == 'RedHat' + - name: Alternatives link created community.general.alternatives: name: hadoop-conf @@ -182,17 +192,25 @@ class AlternativesModule(object): subcommands_parameter = self.module.params['subcommands'] priority_parameter = self.module.params['priority'] if ( - self.path not in self.current_alternatives or - (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or - (subcommands_parameter is not None and ( - not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or - not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) - )) + self.path is not None and ( + self.path not in self.current_alternatives or + (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or + (subcommands_parameter is not None and ( + not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or + not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) + )) + ) ): self.install() # Check if we need to set the preference - if self.mode_selected and self.current_path != self.path: + is_same_path = self.path is not None and self.current_path == self.path + is_same_family = False + if self.current_path is not None and self.current_path in self.current_alternatives: + current_alternative = self.current_alternatives[self.current_path] + is_same_family = current_alternative.get('family') == self.family + + if self.mode_selected and not (is_same_path or is_same_family): self.set() # Check if we need to reset to auto @@ -213,6 +231,8 @@ class AlternativesModule(object): self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + if self.family is not None: + cmd.extend(["--family", self.family]) if self.module.params['subcommands'] is not None: subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] @@ -228,6 +248,7 @@ class AlternativesModule(object): self.result['diff']['after'] = dict( state=AlternativeState.PRESENT, path=self.path, + family=self.family, priority=self.priority, link=self.link, ) @@ -248,9 +269,15 @@ class AlternativesModule(object): self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) def set(self): - cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path] + # Path takes precedence over family as it is more specific + if self.path is None: + arg = self.family + else: + arg = self.path + + cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg] self.result['changed'] = True - self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name)) + self.messages.append("Set alternative '%s' for '%s'." % (arg, self.name)) if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) @@ -277,6 +304,10 @@ class AlternativesModule(object): def path(self): return self.module.params.get('path') + @property + def family(self): + return self.module.params.get('family') + @property def link(self): return self.module.params.get('link') or self.current_link @@ -321,7 +352,7 @@ class AlternativesModule(object): current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) match = current_mode_regex.search(display_output) @@ -346,9 +377,10 @@ class AlternativesModule(object): if not subcmd_path_map and self.subcommands: subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} - for path, prio, subcmd in alternative_regex.findall(display_output): + for path, family, prio, subcmd in alternative_regex.findall(display_output): self.current_alternatives[path] = dict( priority=int(prio), + family=family, subcommands=[dict( name=name, path=spath, @@ -383,7 +415,8 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), - path=dict(type='path', required=True), + path=dict(type='path'), + family=dict(type='str'), link=dict(type='path'), priority=dict(type='int'), state=dict( @@ -398,6 +431,7 @@ def main(): )), ), supports_check_mode=True, + required_one_of=[('path', 'family')] ) AlternativesModule(module) diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml index 81d6a7b0df..cd86b085d4 100644 --- a/tests/integration/targets/alternatives/tasks/main.yml +++ b/tests/integration/targets/alternatives/tasks/main.yml @@ -58,6 +58,12 @@ - include_tasks: remove_links.yml - include_tasks: tests_state.yml + # Test for the family parameter + - block: + - include_tasks: remove_links.yml + - include_tasks: tests_family.yml + when: ansible_os_family == 'RedHat' + # Cleanup always: - include_tasks: remove_links.yml diff --git a/tests/integration/targets/alternatives/tasks/tests_family.yml b/tests/integration/targets/alternatives/tasks/tests_family.yml new file mode 100644 index 0000000000..ac4eadebe1 --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/tests_family.yml @@ -0,0 +1,65 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Add an alternative with a family + alternatives: + name: dummy + path: /usr/bin/dummy1 + link: /usr/bin/dummy + family: family1 + priority: 100 + state: selected + +- name: Ensure that the alternative has family assigned + shell: 'grep family1 {{ alternatives_dir }}/dummy' + +- name: Add two alternatives with different families + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item.n }}' + link: /usr/bin/dummy + family: family2 + priority: "{{ item.priority }}" + state: present + loop: + - { n: 2, priority: 20 } + - { n: 3, priority: 10 } + - { n: 4, priority: 5 } + +# Here we select the whole family of alternatives +- name: Set family as an alternatives + alternatives: + name: dummy + family: family2 + state: selected + +- name: Ensure manual mode + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual"' + +- name: Execute the current dummy command + shell: dummy + register: cmd + +# Despite the fact that there is alternative with higher priority (/usr/bin/dummy1), +# it is not chosen as it doesn't belong to the selected family +- name: Ensure that the alternative from the selected family is used + assert: + that: + - cmd.stdout == "dummy2" + +- name: Remove the alternative with the highest priority that belongs to the family + alternatives: + name: dummy + path: '/usr/bin/dummy2' + state: absent + +- name: Execute the current dummy command + shell: dummy + register: cmd + +- name: Ensure that the next alternative is selected as having the highest priority from the family + assert: + that: + - cmd.stdout == "dummy3" From 626c0e104975f2bfe1c85e8816f4b64594e06bed Mon Sep 17 00:00:00 2001 From: Thibaut Decombe <68703331+UnknownPlatypus@users.noreply.github.com> Date: Sat, 16 Nov 2024 18:34:47 +0100 Subject: [PATCH 334/482] Remove redundant `HomebrewValidate.valid_package` calls in homebrew module. (#9076) * Remove redundant `HomebrewValidate.valid_package` calls * Add changelog fragment --- ...cated-homebrew-package-name-validation.yml | 2 + plugins/modules/homebrew.py | 37 +------------------ 2 files changed, 3 insertions(+), 36 deletions(-) create mode 100644 changelogs/fragments/9076-remove-duplicated-homebrew-package-name-validation.yml diff --git a/changelogs/fragments/9076-remove-duplicated-homebrew-package-name-validation.yml b/changelogs/fragments/9076-remove-duplicated-homebrew-package-name-validation.yml new file mode 100644 index 0000000000..b067625c0c --- /dev/null +++ b/changelogs/fragments/9076-remove-duplicated-homebrew-package-name-validation.yml @@ -0,0 +1,2 @@ +minor_changes: + - homebrew - remove duplicated package name validation (https://github.com/ansible-collections/community.general/pull/9076). diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 58b13f83d4..bc5d8649e7 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -401,11 +401,6 @@ class Homebrew(object): # checks ------------------------------------------------------- {{{ def _current_package_is_installed(self): - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - cmd = [ "{brew_path}".format(brew_path=self.brew_path), "info", @@ -424,9 +419,6 @@ class Homebrew(object): return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks") def _current_package_is_outdated(self): - if not HomebrewValidate.valid_package(self.current_package): - return False - rc, out, err = self.module.run_command([ self.brew_path, 'outdated', @@ -436,9 +428,7 @@ class Homebrew(object): return rc != 0 def _current_package_is_installed_from_head(self): - if not HomebrewValidate.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): + if not self._current_package_is_installed(): return False rc, out, err = self.module.run_command([ @@ -534,11 +524,6 @@ class Homebrew(object): # installed ------------------------------ {{{ def _install_current_package(self): - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - if self._current_package_is_installed(): self.unchanged_count += 1 self.unchanged_pkgs.append(self.current_package) @@ -595,11 +580,6 @@ class Homebrew(object): def _upgrade_current_package(self): command = 'upgrade' - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - current_package_is_installed = self._current_package_is_installed() if not current_package_is_installed: command = 'install' @@ -667,11 +647,6 @@ class Homebrew(object): # uninstalled ---------------------------- {{{ def _uninstall_current_package(self): - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - if not self._current_package_is_installed(): self.unchanged_count += 1 self.unchanged_pkgs.append(self.current_package) @@ -716,11 +691,6 @@ class Homebrew(object): # linked --------------------------------- {{{ def _link_current_package(self): - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - if not self._current_package_is_installed(): self.failed = True self.message = 'Package not installed: {0}.'.format(self.current_package) @@ -763,11 +733,6 @@ class Homebrew(object): # unlinked ------------------------------- {{{ def _unlink_current_package(self): - if not HomebrewValidate.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - if not self._current_package_is_installed(): self.failed = True self.message = 'Package not installed: {0}.'.format(self.current_package) From 36c7e56005b2e78dff9bec3b3a605e5834661128 Mon Sep 17 00:00:00 2001 From: Andrew Hyatt <4400272+ahyattdev@users.noreply.github.com> Date: Sun, 17 Nov 2024 09:27:53 -0500 Subject: [PATCH 335/482] dnf_config_manager: use --assumeyes when changing state (#9124) * dnf_config_manager: use --assumeyesm when changing state * changelog fragment * update tests * format fix * Update changelogs/fragments/9124-dnf_config_manager.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/9124-dnf_config_manager.yml | 2 ++ plugins/modules/dnf_config_manager.py | 2 +- tests/unit/plugins/modules/test_dnf_config_manager.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/9124-dnf_config_manager.yml diff --git a/changelogs/fragments/9124-dnf_config_manager.yml b/changelogs/fragments/9124-dnf_config_manager.yml new file mode 100644 index 0000000000..9c87f02d64 --- /dev/null +++ b/changelogs/fragments/9124-dnf_config_manager.yml @@ -0,0 +1,2 @@ +bugfixes: + - dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, https://github.com/ansible-collections/community.general/issues/8830). diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py index 069fd0ddc7..9ec439c225 100644 --- a/plugins/modules/dnf_config_manager.py +++ b/plugins/modules/dnf_config_manager.py @@ -153,7 +153,7 @@ def get_repo_states(module): def set_repo_states(module, repo_ids, state): - module.run_command([DNF_BIN, 'config-manager', '--set-{0}'.format(state)] + repo_ids, check_rc=True) + module.run_command([DNF_BIN, 'config-manager', '--assumeyes', '--set-{0}'.format(state)] + repo_ids, check_rc=True) def pack_repo_states_for_return(states): diff --git a/tests/unit/plugins/modules/test_dnf_config_manager.py b/tests/unit/plugins/modules/test_dnf_config_manager.py index 90bffe4365..7b231e10a5 100644 --- a/tests/unit/plugins/modules/test_dnf_config_manager.py +++ b/tests/unit/plugins/modules/test_dnf_config_manager.py @@ -254,8 +254,8 @@ expected_repo_states_crb_disabled = {'disabled': ['appstream-debuginfo', 'rpmfusion-nonfree-updates']} call_get_repo_states = call(['/usr/bin/dnf', 'repolist', '--all', '--verbose'], check_rc=True) -call_disable_crb = call(['/usr/bin/dnf', 'config-manager', '--set-disabled', 'crb'], check_rc=True) -call_enable_crb = call(['/usr/bin/dnf', 'config-manager', '--set-enabled', 'crb'], check_rc=True) +call_disable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-disabled', 'crb'], check_rc=True) +call_enable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-enabled', 'crb'], check_rc=True) class TestDNFConfigManager(ModuleTestCase): From 2a66ac719a618fb24098611263b6aeb3a850ba44 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 19:36:38 +0100 Subject: [PATCH 336/482] build(deps): bump fsfe/reuse-action from 4 to 5 (#9143) Bumps [fsfe/reuse-action](https://github.com/fsfe/reuse-action) from 4 to 5. - [Release notes](https://github.com/fsfe/reuse-action/releases) - [Commits](https://github.com/fsfe/reuse-action/compare/v4...v5) --- updated-dependencies: - dependency-name: fsfe/reuse-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/reuse.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml index e5195f6dcf..7c6f76bd5b 100644 --- a/.github/workflows/reuse.yml +++ b/.github/workflows/reuse.yml @@ -31,4 +31,4 @@ jobs: ref: ${{ github.event.pull_request.head.sha || '' }} - name: REUSE Compliance Check - uses: fsfe/reuse-action@v4 + uses: fsfe/reuse-action@v5 From dc856ab6fe5667c79621d178dffd31f2e78347d4 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Mon, 18 Nov 2024 20:42:14 +0100 Subject: [PATCH 337/482] filters/dict: document the correct return value (#9145) --- plugins/filter/dict.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 720c9def96..3e0558bb61 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -57,8 +57,8 @@ EXAMPLES = ''' RETURN = ''' _value: - description: The dictionary having the provided key-value pairs. - type: boolean + description: A dictionary with the provided key-value pairs. + type: dictionary ''' From 54194ccb2427cccf282eea1a20294dd794ce77dc Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Tue, 19 Nov 2024 20:08:16 +0100 Subject: [PATCH 338/482] modprobe: document when 'persistent' was added. (#9144) This is based on `git tag --contains 29f5033737a7fd86349ff3daab7d7ee7db66ad00`. --- plugins/modules/modprobe.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index 57e682245f..3d6a7c2410 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -46,6 +46,7 @@ options: type: str choices: [ disabled, absent, present ] default: disabled + version_added: 7.0.0 description: - Persistency between reboots for configured module. - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. From 5a9715874af655d0b3f7180bcd2775987c3fef7f Mon Sep 17 00:00:00 2001 From: Ian Richardson <126898943+PredatarIan@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:07:06 +0000 Subject: [PATCH 339/482] Update docs for github_app_access_token.py (#9152) * Update github_app_access_token.py updating docs - github_token missing {{ }} * Update plugins/lookup/github_app_access_token.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/lookup/github_app_access_token.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index 1d3c526c33..cee635fc0a 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -55,8 +55,8 @@ EXAMPLES = ''' dest: /srv/checkout vars: github_token: >- - lookup('community.general.github_app_access_token', key_path='/home/to_your/key', - app_id='123456', installation_id='64209') + {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', + app_id='123456', installation_id='64209') }} ''' RETURN = ''' From 4b0d5cb8cfc9722d62ba35de9367109d1bf2f18f Mon Sep 17 00:00:00 2001 From: Stanislav Shamilov Date: Wed, 20 Nov 2024 20:08:34 +0200 Subject: [PATCH 340/482] dnf_config_manager: fix parsing for non-english locales (#9157) * dnf_config_manager: forces locale to 'C' when the module starts * adds changelog fragment * Apply suggestions from code review Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/9157-fix-dnf_config_manager-locale.yml | 2 ++ plugins/modules/dnf_config_manager.py | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelogs/fragments/9157-fix-dnf_config_manager-locale.yml diff --git a/changelogs/fragments/9157-fix-dnf_config_manager-locale.yml b/changelogs/fragments/9157-fix-dnf_config_manager-locale.yml new file mode 100644 index 0000000000..f2084dfa5f --- /dev/null +++ b/changelogs/fragments/9157-fix-dnf_config_manager-locale.yml @@ -0,0 +1,2 @@ +bugfixes: + - dnf_config_manager - forces locale to ``C`` before module starts. If the locale was set to non-English, the output of the ``dnf config-manager`` could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, https://github.com/ansible-collections/community.general/issues/9046). \ No newline at end of file diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py index 9ec439c225..aa2571d9f0 100644 --- a/plugins/modules/dnf_config_manager.py +++ b/plugins/modules/dnf_config_manager.py @@ -186,6 +186,7 @@ def main(): argument_spec=module_args, supports_check_mode=True ) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') if not os.path.exists(DNF_BIN): module.fail_json(msg="%s was not found" % DNF_BIN) From 8078a08f72eb4b327bdf3cd6c58f87f28e884177 Mon Sep 17 00:00:00 2001 From: Spencer Boyer Date: Thu, 21 Nov 2024 14:34:06 -0600 Subject: [PATCH 341/482] Add server-side artifact fetching to proxmox_template module (#9113) * Add server-side artifact fetching to proxmox_template module * Update docs, format per feedback. * Formatting plugins/modules/proxmox_template.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: spencer Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- ...support-server-side-artifact-fetching.yaml | 2 + plugins/modules/proxmox_template.py | 75 ++++++++++++++++--- 2 files changed, 65 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/7402-proxmox-template-support-server-side-artifact-fetching.yaml diff --git a/changelogs/fragments/7402-proxmox-template-support-server-side-artifact-fetching.yaml b/changelogs/fragments/7402-proxmox-template-support-server-side-artifact-fetching.yaml new file mode 100644 index 0000000000..4a5fefdc96 --- /dev/null +++ b/changelogs/fragments/7402-proxmox-template-support-server-side-artifact-fetching.yaml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_template - add server side artifact fetching support (https://github.com/ansible-collections/community.general/pull/9113). \ No newline at end of file diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py index 134286164c..876e8a6847 100644 --- a/plugins/modules/proxmox_template.py +++ b/plugins/modules/proxmox_template.py @@ -30,8 +30,14 @@ options: src: description: - Path to uploaded file. - - Required only for O(state=present). + - Exactly one of O(src) or O(url) is required for O(state=present). type: path + url: + description: + - URL to file to download + - Exactly one of O(src) or O(url) is required for O(state=present). + type: str + version_added: 10.1.0 template: description: - The template name. @@ -85,6 +91,14 @@ EXAMPLES = ''' api_host: node1 src: ~/ubuntu-14.04-x86_64.tar.gz +- name: Pull new openvz template with minimal options + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz + - name: > Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) @@ -105,6 +119,17 @@ EXAMPLES = ''' src: ~/ubuntu-14.04-x86_64.tar.gz force: true +- name: Pull new openvz template with all options and force overwrite + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + storage: local + content_type: vztmpl + url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz + force: true + - name: Delete template with minimal options community.general.proxmox_template: node: uk-mc02 @@ -132,6 +157,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.six.moves.urllib.parse import urlparse REQUESTS_TOOLBELT_ERR = None try: @@ -179,6 +205,17 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible): except Exception as e: self.module.fail_json(msg="Uploading template %s failed with error: %s" % (realpath, e)) + def fetch_template(self, node, storage, content_type, url, timeout): + """Fetch a template from a web url source using the proxmox download-url endpoint + """ + try: + taskid = self.proxmox_api.nodes(node).storage(storage)("download-url").post( + url=url, content=content_type, filename=os.path.basename(url) + ) + return self.task_status(node, taskid, timeout) + except Exception as e: + self.module.fail_json(msg="Fetching template from url %s failed with error: %s" % (url, e)) + def download_template(self, node, storage, template, timeout): try: taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template) @@ -205,6 +242,7 @@ def main(): template_args = dict( node=dict(), src=dict(type='path'), + url=dict(), template=dict(), content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), storage=dict(default='local'), @@ -218,7 +256,8 @@ def main(): argument_spec=module_args, required_together=[('api_token_id', 'api_token_secret')], required_one_of=[('api_password', 'api_token_id')], - required_if=[('state', 'absent', ['template'])] + required_if=[('state', 'absent', ['template'])], + mutually_exclusive=[("src", "url")], ) proxmox = ProxmoxTemplateAnsible(module) @@ -231,9 +270,10 @@ def main(): if state == 'present': content_type = module.params['content_type'] src = module.params['src'] + url = module.params['url'] # download appliance template - if content_type == 'vztmpl' and not src: + if content_type == 'vztmpl' and not (src or url) : template = module.params['template'] if not template: @@ -245,16 +285,27 @@ def main(): if proxmox.download_template(node, storage, template, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) - template = os.path.basename(src) - if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) - elif not src: - module.fail_json(msg='src param to uploading template file is mandatory') - elif not (os.path.exists(src) and os.path.isfile(src)): - module.fail_json(msg='template file on path %s not exists' % src) + if not src and not url: + module.fail_json(msg='src or url param for uploading template file is mandatory') + elif not url: + template = os.path.basename(src) + if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) + elif not (os.path.exists(src) and os.path.isfile(src)): + module.fail_json(msg='template file on path %s not exists' % src) - if proxmox.upload_template(node, storage, content_type, src, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + if proxmox.upload_template(node, storage, content_type, src, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + elif not src: + template = os.path.basename(urlparse(url).path) + if proxmox.has_template(node, storage, content_type, template): + if not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) + elif not proxmox.delete_template(node, storage, content_type, template, timeout): + module.fail_json(changed=False, msg='failed to delete template with volid=%s:%s/%s' % (storage, content_type, template)) + + if proxmox.fetch_template(node, storage, content_type, url, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) elif state == 'absent': try: From bf6ae7bf59564d8d210848b1e1e88c122453c5df Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 22 Nov 2024 18:36:16 +1300 Subject: [PATCH 342/482] pipx: add testcase for upgrade --global (#9170) --- tests/integration/targets/pipx/tasks/main.yml | 3 ++ .../pipx/tasks/testcase-8793-global.yml | 2 +- .../tasks/testcase-9103-upgrade-global.yml | 38 +++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index e764f17f68..25380fcb17 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -263,3 +263,6 @@ - name: Include testcase for PR 9009 injectpkg --global ansible.builtin.include_tasks: testcase-9009-fixglobal.yml + + - name: Include testcase for PR 9103 upgrade --global + ansible.builtin.include_tasks: testcase-9103-upgrade-global.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml index 7d3c871306..b9bf8b75f6 100644 --- a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml +++ b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml @@ -5,7 +5,7 @@ - name: Set up environment environment: - PATH: /usr/local/bin:{{ ansible_env.PATH }} + PATH: /root/.local/bin:/usr/local/bin:{{ ansible_env.PATH }} block: - name: Remove global pipx dir ansible.builtin.file: diff --git a/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml b/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml new file mode 100644 index 0000000000..60621a42be --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml @@ -0,0 +1,38 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9103-Ensure application hello-world is uninstalled + community.general.pipx: + name: hello-world + state: absent + global: true + +- name: 9103-Install application hello-world + community.general.pipx: + name: hello-world + source: hello-world==0.1 + global: true + register: install_hw + +- name: 9103-Upgrade application hello-world + community.general.pipx: + state: upgrade + name: hello-world + global: true + register: upgrade_hw + +- name: 9103-Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + global: true + +- name: 9103-Assertions + ansible.builtin.assert: + that: + - install_hw is changed + - upgrade_hw is changed + - upgrade_hw.cmd[-3] == "upgrade" + - upgrade_hw.cmd[-2] == "--global" From 47637cdec765a84098b1977e1653cf23e2600fe4 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 23 Nov 2024 10:47:22 +1300 Subject: [PATCH 343/482] locale_gen: add testcase for de_CH.utf8 (#9176) --- tests/integration/targets/locale_gen/vars/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/targets/locale_gen/vars/main.yml b/tests/integration/targets/locale_gen/vars/main.yml index 23358e6374..6d4f7de60d 100644 --- a/tests/integration/targets/locale_gen/vars/main.yml +++ b/tests/integration/targets/locale_gen/vars/main.yml @@ -24,3 +24,6 @@ locale_list_basic: - tr_CY.UTF-8 - tr_CY.utf8 skip_removal: false + - localegen: de_CH.UTF-8 + locales: [de_CH.utf8] + skip_removal: false From 152339a8f9e7c3986399b7c4eeab86a617d0ae54 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 24 Nov 2024 03:03:56 +1300 Subject: [PATCH 344/482] gio_mime: fix bug when looking for version (#9171) * gio_mime: fix bug when looking for version * add changelog frag --- changelogs/fragments/9171-gio-mime-fix-version.yml | 2 ++ plugins/module_utils/gio_mime.py | 5 +++-- plugins/modules/gio_mime.py | 5 ++--- tests/unit/plugins/modules/test_gio_mime.yaml | 6 +++--- 4 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/9171-gio-mime-fix-version.yml diff --git a/changelogs/fragments/9171-gio-mime-fix-version.yml b/changelogs/fragments/9171-gio-mime-fix-version.yml new file mode 100644 index 0000000000..ca9dbddd7f --- /dev/null +++ b/changelogs/fragments/9171-gio-mime-fix-version.yml @@ -0,0 +1,2 @@ +bugfixes: + - gio_mime - fix command line when determining version of ``gio`` (https://github.com/ansible-collections/community.general/pull/9171, https://github.com/ansible-collections/community.general/issues/9158). diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py index 132981a339..c734e13a81 100644 --- a/plugins/module_utils/gio_mime.py +++ b/plugins/module_utils/gio_mime.py @@ -12,8 +12,9 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor def gio_mime_runner(module, **kwargs): return CmdRunner( module, - command=['gio', 'mime'], + command=['gio'], arg_formats=dict( + mime=cmd_runner_fmt.as_fixed('mime'), mime_type=cmd_runner_fmt.as_list(), handler=cmd_runner_fmt.as_list(), version=cmd_runner_fmt.as_fixed('--version'), @@ -29,5 +30,5 @@ def gio_mime_get(runner, mime_type): out = out.splitlines()[0] return out.split()[-1] - with runner("mime_type", output_process=process) as ctx: + with runner("mime mime_type", output_process=process) as ctx: return ctx.run(mime_type=mime_type) diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 587aaec427..a3b340b677 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -108,12 +108,11 @@ class GioMime(ModuleHelper): def __run__(self): check_mode_return = (0, 'Module executed in check mode', '') if self.vars.has_changed: - with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: rc, out, err = ctx.run() self.vars.stdout = out self.vars.stderr = err - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set("run_info", ctx.run_info, verbosity=4) def main(): diff --git a/tests/unit/plugins/modules/test_gio_mime.yaml b/tests/unit/plugins/modules/test_gio_mime.yaml index f71e595701..3645446291 100644 --- a/tests/unit/plugins/modules/test_gio_mime.yaml +++ b/tests/unit/plugins/modules/test_gio_mime.yaml @@ -13,7 +13,7 @@ changed: true mocks: run_command: - - command: [/testbin/gio, mime, --version] + - command: [/testbin/gio, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 out: "2.80.0\n" @@ -40,7 +40,7 @@ skip: test helper does not support check mode yet mocks: run_command: - - command: [/testbin/gio, mime, --version] + - command: [/testbin/gio, --version] environ: *env-def rc: 0 out: "2.80.0\n" @@ -65,7 +65,7 @@ changed: false mocks: run_command: - - command: [/testbin/gio, mime, --version] + - command: [/testbin/gio, --version] environ: *env-def rc: 0 out: "2.80.0\n" From a3bd49c01064fc68a505081e4dd78997b41e0fa9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:09:53 +1300 Subject: [PATCH 345/482] deps module utils: unit tests + minor improvement (#9179) * deps module utils: unit tests + minor improvement * deps.clear() calls dict.clear() instead of creating new dict * add changelog frag --- changelogs/fragments/9179-deps-tests.yml | 2 + plugins/module_utils/deps.py | 4 ++ tests/unit/plugins/module_utils/test_deps.py | 69 +++++++++++++++++++ .../module_utils/test_module_helper.py | 5 ++ tests/unit/plugins/modules/conftest.py | 2 +- 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9179-deps-tests.yml create mode 100644 tests/unit/plugins/module_utils/test_deps.py diff --git a/changelogs/fragments/9179-deps-tests.yml b/changelogs/fragments/9179-deps-tests.yml new file mode 100644 index 0000000000..1ddf109033 --- /dev/null +++ b/changelogs/fragments/9179-deps-tests.yml @@ -0,0 +1,2 @@ +minor_changes: + - deps module utils - add ``deps.clear()`` to clear out previously declared dependencies (https://github.com/ansible-collections/community.general/pull/9179). diff --git a/plugins/module_utils/deps.py b/plugins/module_utils/deps.py index a2413d1952..66847ccd25 100644 --- a/plugins/module_utils/deps.py +++ b/plugins/module_utils/deps.py @@ -96,3 +96,7 @@ def validate(module, spec=None): def failed(spec=None): return any(_deps[d].failed for d in _select_names(spec)) + + +def clear(): + _deps.clear() diff --git a/tests/unit/plugins/module_utils/test_deps.py b/tests/unit/plugins/module_utils/test_deps.py new file mode 100644 index 0000000000..70f0eac0fd --- /dev/null +++ b/tests/unit/plugins/module_utils/test_deps.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# (c) 2024, Alexei Znamensky +# Copyright (c) 2024 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat.mock import MagicMock + +import pytest + +from ansible_collections.community.general.plugins.module_utils import deps + + +@pytest.fixture +def module(): + m = MagicMock() + m.fail_json.side_effect = RuntimeError + return m + + +def test_wrong_name(module): + with deps.declare("sys") as sys_dep: + import sys # noqa: F401, pylint: disable=unused-import + + with pytest.raises(KeyError): + deps.validate(module, "wrong_name") + + +def test_fail_potatoes(module): + with deps.declare("potatoes", reason="Must have potatoes") as potatoes_dep: + import potatoes_that_will_never_be_there # noqa: F401, pylint: disable=unused-import + + with pytest.raises(RuntimeError): + deps.validate(module) + + assert potatoes_dep.failed is True + assert potatoes_dep.message.startswith("Failed to import the required Python library") + + +def test_sys(module): + with deps.declare("sys") as sys_dep: + import sys # noqa: F401, pylint: disable=unused-import + + deps.validate(module) + + assert sys_dep.failed is False + + +def test_multiple(module): + with deps.declare("mpotatoes", reason="Must have mpotatoes"): + import potatoes_that_will_never_be_there # noqa: F401, pylint: disable=unused-import + + with deps.declare("msys", reason="Must have msys"): + import sys # noqa: F401, pylint: disable=unused-import + + deps.validate(module, "msys") + deps.validate(module, "-mpotatoes") + + with pytest.raises(RuntimeError): + deps.validate(module) + + with pytest.raises(RuntimeError): + deps.validate(module, "-msys") + + with pytest.raises(RuntimeError): + deps.validate(module, "mpotatoes") diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index b1e2eafc7f..cbcdaae788 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -14,6 +14,7 @@ from ansible_collections.community.general.plugins.module_utils.module_helper im ) +# remove in 11.0.0 def test_dependency_ctxmgr(): ctx = DependencyCtxMgr("POTATOES", "Potatoes must be installed") with ctx: @@ -36,6 +37,7 @@ def test_dependency_ctxmgr(): assert ctx.has_it +# remove in 11.0.0 def test_variable_meta(): meta = VarMeta() assert meta.output is True @@ -51,6 +53,7 @@ def test_variable_meta(): assert meta.diff_result is None +# remove in 11.0.0 def test_variable_meta_diff(): meta = VarMeta(diff=True) assert meta.output is True @@ -70,6 +73,7 @@ def test_variable_meta_diff(): assert meta.diff_result == {"before": "abc", "after": "ghi"} +# remove in 11.0.0 def test_vardict(): vd = VarDict() vd.set('a', 123) @@ -99,6 +103,7 @@ def test_vardict(): assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) +# remove in 11.0.0 def test_variable_meta_change(): vd = VarDict() vd.set('a', 123, change=True) diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index 6e96c58316..d357137651 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -48,4 +48,4 @@ def patch_ansible_module(request, mocker): @pytest.fixture(autouse=True) def deps_cleanup(): - deps._deps.clear() + deps.clear() From a9449ccc2e97a37348f65a8280c1ee71ec97c117 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:10:19 +1300 Subject: [PATCH 346/482] pipx/pipx_info: add return value version (#9180) * pipx/pipx_info: add return value version * add changelog frag --- changelogs/fragments/9180-pipx-version.yml | 3 ++ plugins/module_utils/pipx.py | 37 ++++++++++--------- plugins/modules/pipx.py | 13 +++++++ plugins/modules/pipx_info.py | 10 +++++ tests/integration/targets/pipx/tasks/main.yml | 1 + 5 files changed, 46 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/9180-pipx-version.yml diff --git a/changelogs/fragments/9180-pipx-version.yml b/changelogs/fragments/9180-pipx-version.yml new file mode 100644 index 0000000000..f07d66c83c --- /dev/null +++ b/changelogs/fragments/9180-pipx-version.yml @@ -0,0 +1,3 @@ +minor_changes: + - pipx - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). + - pipx_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 75b6621c1b..de43f80b40 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -10,7 +10,7 @@ __metaclass__ = type import json -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt pipx_common_argspec = { @@ -40,24 +40,25 @@ _state_map = dict( def pipx_runner(module, command, **kwargs): arg_formats = dict( - state=fmt.as_map(_state_map), - name=fmt.as_list(), - name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), - install_apps=fmt.as_bool("--include-apps"), - install_deps=fmt.as_bool("--include-deps"), - inject_packages=fmt.as_list(), - force=fmt.as_bool("--force"), - include_injected=fmt.as_bool("--include-injected"), - index_url=fmt.as_opt_val('--index-url'), - python=fmt.as_opt_val('--python'), - system_site_packages=fmt.as_bool("--system-site-packages"), - _list=fmt.as_fixed(['list', '--include-injected', '--json']), - editable=fmt.as_bool("--editable"), - pip_args=fmt.as_opt_eq_val('--pip-args'), - suffix=fmt.as_opt_val('--suffix'), - spec_metadata=fmt.as_list(), + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + name_source=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=cmd_runner_fmt.as_bool("--include-apps"), + install_deps=cmd_runner_fmt.as_bool("--include-deps"), + inject_packages=cmd_runner_fmt.as_list(), + force=cmd_runner_fmt.as_bool("--force"), + include_injected=cmd_runner_fmt.as_bool("--include-injected"), + index_url=cmd_runner_fmt.as_opt_val('--index-url'), + python=cmd_runner_fmt.as_opt_val('--python'), + system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"), + _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']), + editable=cmd_runner_fmt.as_bool("--editable"), + pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'), + suffix=cmd_runner_fmt.as_opt_val('--suffix'), + spec_metadata=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), ) - arg_formats["global"] = fmt.as_bool("--global") + arg_formats["global"] = cmd_runner_fmt.as_bool("--global") runner = CmdRunner( module, diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 9bde0f180c..1706f125d9 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -190,6 +190,15 @@ EXAMPLES = """ with_items: "{{ pipx_packages }}" """ +RETURN = """ +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 +""" + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list @@ -272,6 +281,10 @@ class PipX(StateModuleHelper): self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + def __quit_module__(self): self.vars.application = self._retrieve_installed() diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 33fbad0e5d..24a6739024 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -119,6 +119,13 @@ cmd: type: list elements: str sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"] + +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -149,6 +156,9 @@ class PipXInfo(ModuleHelper): facts = ansible_facts(self.module, gather_subset=['python']) self.command = [facts['python']['executable'], '-m', 'pipx'] self.runner = pipx_runner(self.module, self.command) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() def __run__(self): output_process = make_process_list(self, **self.vars.as_dict()) diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index 25380fcb17..0e04826371 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -57,6 +57,7 @@ assert: that: - install_tox is changed + - "'version' in install_tox" - "'tox' in install_tox.application" - install_tox_again is not changed - install_tox_again_force is changed From 3c23ce4a7bcdd08e172a58669842d4faec63d7c5 Mon Sep 17 00:00:00 2001 From: Sergio <45396489+Sergio-IME@users.noreply.github.com> Date: Sun, 24 Nov 2024 22:10:51 +0100 Subject: [PATCH 347/482] `cloudflare_dns`: add support for `comment` and `tags` (#9132) * `cloudflare_dns`: add support for `comment` and `tags` * `cloudflare_dns`: add return values for `comment`/`tags` fields * `cloudflare_dns`: fix return values samples * `cloudflare_dns`: changelog fragment formatting * `cloudflare_dns`: add missing `version_added` * `cloudflare_dns`: remove explicit `required: false` * `cloudflare_dns`: empty `comment` idempotency fix --- .../9132-cloudflare_dns-comment-and-tags.yml | 2 + plugins/modules/cloudflare_dns.py | 66 +++++++++++++++++-- 2 files changed, 63 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/9132-cloudflare_dns-comment-and-tags.yml diff --git a/changelogs/fragments/9132-cloudflare_dns-comment-and-tags.yml b/changelogs/fragments/9132-cloudflare_dns-comment-and-tags.yml new file mode 100644 index 0000000000..b601e39f55 --- /dev/null +++ b/changelogs/fragments/9132-cloudflare_dns-comment-and-tags.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - add support for ``comment`` and ``tags`` (https://github.com/ansible-collections/community.general/pull/9132). diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index 86550966be..a2bcc79f8e 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -31,7 +31,6 @@ options: - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. type: str - required: false version_added: '0.2.0' account_api_key: description: @@ -39,13 +38,11 @@ options: - Required for api keys authentication. - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." type: str - required: false aliases: [ account_api_token ] account_email: description: - Account email. Required for API keys authentication. type: str - required: false algorithm: description: - Algorithm number. @@ -57,6 +54,11 @@ options: - Required for O(type=TLSA) when O(state=present). type: int choices: [ 0, 1, 2, 3 ] + comment: + description: + - Comments or notes about the DNS record. + type: str + version_added: 10.1.0 flag: description: - Issuer Critical Flag. @@ -134,6 +136,12 @@ options: type: str choices: [ absent, present ] default: present + tags: + description: + - Custom tags for the DNS record. + type: list + elements: str + version_added: 10.1.0 timeout: description: - Timeout for Cloudflare API calls. @@ -191,6 +199,18 @@ EXAMPLES = r''' value: 127.0.0.1 api_token: dummyapitoken +- name: Create a record with comment and tags + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + comment: Local test website + tags: + - test + - local + api_token: dummyapitoken + - name: Create a example.net CNAME record to example.com community.general.cloudflare_dns: zone: example.net @@ -299,6 +319,18 @@ record: returned: success, except on record deletion type: complex contains: + comment: + description: Comments or notes about the DNS record. + returned: success + type: str + sample: Domain verification record + version_added: 10.1.0 + comment_modified_on: + description: When the record comment was last modified. Omitted if there is no comment. + returned: success + type: str + sample: "2024-01-01T05:20:00.12345Z" + version_added: 10.1.0 content: description: The record content (details depend on record type). returned: success @@ -333,7 +365,7 @@ record: type: bool sample: false meta: - description: No documentation available. + description: Extra Cloudflare-specific information about the record. returned: success type: dict sample: { auto_added: false } @@ -362,6 +394,19 @@ record: returned: success type: bool sample: false + tags: + description: Custom tags for the DNS record. + returned: success + type: list + elements: str + sample: ['production', 'app'] + version_added: 10.1.0 + tags_modified_on: + description: When the record tags were last modified. Omitted if there are no tags. + returned: success + type: str + sample: "2025-01-01T05:20:00.12345Z" + version_added: 10.1.0 ttl: description: The time-to-live for the record. returned: success @@ -410,9 +455,11 @@ class CloudflareAPI(object): self.account_email = module.params['account_email'] self.algorithm = module.params['algorithm'] self.cert_usage = module.params['cert_usage'] + self.comment = module.params['comment'] self.hash_type = module.params['hash_type'] self.flag = module.params['flag'] self.tag = module.params['tag'] + self.tags = module.params['tags'] self.key_tag = module.params['key_tag'] self.port = module.params['port'] self.priority = module.params['priority'] @@ -662,7 +709,7 @@ class CloudflareAPI(object): def ensure_dns_record(self, **kwargs): params = {} for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']: + 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag', 'tags', 'comment']: if param in kwargs: params[param] = kwargs[param] else: @@ -798,6 +845,9 @@ class CloudflareAPI(object): } search_value = None + new_record['comment'] = params['comment'] or None + new_record['tags'] = params['tags'] or [] + zone_id = self._get_zone_id(params['zone']) records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) # in theory this should be impossible as cloudflare does not allow @@ -826,6 +876,10 @@ class CloudflareAPI(object): do_update = True if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): do_update = True + if cur_record['comment'] != new_record['comment']: + do_update = True + if sorted(cur_record['tags']) != sorted(new_record['tags']): + do_update = True if do_update: if self.module.check_mode: result = new_record @@ -856,11 +910,13 @@ def main(): account_email=dict(type='str', required=False), algorithm=dict(type='int'), cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + comment=dict(type='str'), hash_type=dict(type='int', choices=[1, 2]), key_tag=dict(type='int', no_log=False), port=dict(type='int'), flag=dict(type='int', choices=[0, 1]), tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']), + tags=dict(type='list', elements='str'), priority=dict(type='int', default=1), proto=dict(type='str'), proxied=dict(type='bool', default=False), From fa7c2df4b861dd49b7176b8e48662fee9910c41f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:27:21 +1300 Subject: [PATCH 348/482] ufw: add note to docs about concurrency (#9191) * ufw: add note to docs about concurrency * wordsmithing --- plugins/modules/ufw.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index 7a90647979..fba0ef5fe2 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -23,6 +23,10 @@ author: - Ahti Kitsik (@ahtik) notes: - See C(man ufw) for more examples. + - > + B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, + as firewall rules are meant to be ordered and parallel executions do not guarantee order. + B(Do not use concurrency:) The results are unpredictable and the module may fail silently if you do. requirements: - C(ufw) package extends_documentation_fragment: From b49830861cfbba4d513ef61a832440f20e11aaef Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:27:42 +1300 Subject: [PATCH 349/482] scaleway_lb: simplify function logic (#9189) * scaleway_lb: simplify function logic * add changelog frag --- changelogs/fragments/9189-scalway-lb-simplify-return.yml | 2 ++ plugins/modules/scaleway_lb.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9189-scalway-lb-simplify-return.yml diff --git a/changelogs/fragments/9189-scalway-lb-simplify-return.yml b/changelogs/fragments/9189-scalway-lb-simplify-return.yml new file mode 100644 index 0000000000..39d161f06b --- /dev/null +++ b/changelogs/fragments/9189-scalway-lb-simplify-return.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_lb - minor simplification in the code (https://github.com/ansible-collections/community.general/pull/9189). diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 1083b6da9e..6cc947755d 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -228,8 +228,8 @@ def lb_attributes_should_be_changed(target_lb, wished_lb): if diff: return {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES} - else: - return diff + + return {} def present_strategy(api, wished_lb): From a863b62859541f03d04e9feaaedd4cef9d2e9ff5 Mon Sep 17 00:00:00 2001 From: tomcperry <99128358+tomcperry@users.noreply.github.com> Date: Thu, 28 Nov 2024 06:42:57 +0100 Subject: [PATCH 350/482] fixing broken check mode in the github_key (#9186) * fixing datetime.strftime() * added changelog fragment * Update changelogs/fragments/9186-fix-broken-check-mode-in-github-key.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/9186-fix-broken-check-mode-in-github-key.yml | 2 ++ plugins/modules/github_key.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9186-fix-broken-check-mode-in-github-key.yml diff --git a/changelogs/fragments/9186-fix-broken-check-mode-in-github-key.yml b/changelogs/fragments/9186-fix-broken-check-mode-in-github-key.yml new file mode 100644 index 0000000000..dbf1f145d5 --- /dev/null +++ b/changelogs/fragments/9186-fix-broken-check-mode-in-github-key.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_key - in check mode, a faulty call to ```datetime.strftime(...)``` was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185). \ No newline at end of file diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index a74ead9848..b071e7b339 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -162,7 +162,7 @@ def create_key(session, name, pubkey, check_mode): 'key': pubkey, 'title': name, 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), + 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), 'read_only': False, 'verified': False } From f828bdee2287d603158407bd7ea3f843240dfefa Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 28 Nov 2024 18:50:39 +1300 Subject: [PATCH 351/482] flatpak: force locale language to be C (#9187) * flatpak: force locale langauge to be C * add changelog frag --- changelogs/fragments/9187-flatpak-lang.yml | 2 ++ plugins/modules/flatpak.py | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 changelogs/fragments/9187-flatpak-lang.yml diff --git a/changelogs/fragments/9187-flatpak-lang.yml b/changelogs/fragments/9187-flatpak-lang.yml new file mode 100644 index 0000000000..159923cbdc --- /dev/null +++ b/changelogs/fragments/9187-flatpak-lang.yml @@ -0,0 +1,2 @@ +bugfixes: + - flatpak - force the locale language to ``C`` when running the flatpak command (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883). diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index 09e49e5575..84e4ea8374 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -419,6 +419,8 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + installed, not_installed = flatpak_exists(module, binary, name, method) if state == 'absent' and installed: uninstall_flat(module, binary, installed, method) From 420f78de2fea14de761b4e326fa500544139e3a9 Mon Sep 17 00:00:00 2001 From: shios86 <17804953+shios86@users.noreply.github.com> Date: Thu, 28 Nov 2024 06:51:21 +0100 Subject: [PATCH 352/482] Add the options apply_live to rpm_ostree_pkg (#9167) rpm_ostree_pkg: add support for `apply_live` and return value `needs_reboot` #9167 --- .../9167-rpm_ostree_pkg-apply_live.yml | 3 ++ plugins/modules/rpm_ostree_pkg.py | 37 ++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9167-rpm_ostree_pkg-apply_live.yml diff --git a/changelogs/fragments/9167-rpm_ostree_pkg-apply_live.yml b/changelogs/fragments/9167-rpm_ostree_pkg-apply_live.yml new file mode 100644 index 0000000000..e473dedd0b --- /dev/null +++ b/changelogs/fragments/9167-rpm_ostree_pkg-apply_live.yml @@ -0,0 +1,3 @@ +minor_changes: +- rpm_ostree_pkg - added the options ``apply_live`` (https://github.com/ansible-collections/community.general/pull/9167). +- rpm_ostree_pkg - added the return value ``needs_reboot`` (https://github.com/ansible-collections/community.general/pull/9167). diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index 1a02b2d71c..8c5c693f60 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -40,6 +40,14 @@ options: choices: [ 'absent', 'present' ] default: 'present' type: str + apply_live: + description: + - Adds the options C(--apply-live) when O(state=present). + - Option is ignored when O(state=absent). + - For more information, please see U(https://coreos.github.io/rpm-ostree/apply-live/). + type: bool + default: false + version_added: 10.1.0 author: - Dusty Mabe (@dustymabe) - Abhijeet Kasurde (@Akasurde) @@ -56,6 +64,12 @@ EXAMPLES = r''' name: nfs-utils state: absent +- name: Apply the overlay package live + community.general.rpm_ostree: + name: nfs-utils + state: present + apply_live: true + # In case a different transaction is currently running the module would fail. # Adding a delay can help mitigate this problem: - name: Install overlay package @@ -104,6 +118,12 @@ cmd: returned: always type: str sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' +needs_reboot: + description: Determine if machine needs a reboot to apply current changes. + returned: success + type: bool + sample: true + version_added: 10.1.0 ''' from ansible.module_utils.basic import AnsibleModule @@ -124,19 +144,24 @@ class RpmOstreePkg: stdout='', stderr='', cmd='', + needs_reboot=False, ) # Ensure rpm-ostree command exists cmd = [self.module.get_bin_path('rpm-ostree', required=True)] # Decide action to perform - if self.state in ('present'): + if self.state == 'present': results['action'] = 'install' cmd.append('install') - elif self.state in ('absent'): + elif self.state == 'absent': results['action'] = 'uninstall' cmd.append('uninstall') + # Add the options to the command line + if self.params['apply_live'] and self.state == 'present': + cmd.extend(['--apply-live', '--assumeyes']) + # Additional parameters cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) for pkg in self.params['name']: @@ -145,6 +170,10 @@ class RpmOstreePkg: rc, out, err = self.module.run_command(cmd) + # Determine if system needs a reboot to apply change + if 'Changes queued for next boot. Run "systemctl reboot" to start a reboot' in out: + results['needs_reboot'] = True + results.update(dict( rc=rc, cmd=' '.join(cmd), @@ -180,6 +209,10 @@ def main(): type='list', elements='str', ), + apply_live=dict( + type='bool', + default=False, + ), ), ) From c38b4749825ec83c88226537dccd1bc1f71af077 Mon Sep 17 00:00:00 2001 From: IamLunchbox <56757745+IamLunchbox@users.noreply.github.com> Date: Mon, 2 Dec 2024 20:06:08 +0100 Subject: [PATCH 353/482] Add backup module for proxmox (#9197) * Defined configuration variables, main backup function todo * Defined configuration variables, main backup function todo * wip * permission checks and basic flow done, final request missing * ansible-test and unit test open * Improve documentation * fix pep8 errors * remove f-string and fix bugs through manual testing * longer full example * improve docs * error message for fail + timeout * move sleep location * remove residual debugger * include newline for better readability * more linting errors fixed * Include UPIDs as return value * Output logs as comma separated value, move exception and create new abstraction for api calls * pretter logs * Update project to final version * Remove accidential placeholder for integration test * Fix missing explizit string in docstring * Reorder imports below docstrings * remove type annotations and fix indendation of options dict * prettier idendation and aplhabetic ordering of options dict * aplhabetic ordering of docstring options * Remove the rest of type hinting as well :( * fix version * improve documentation * add change detection mode * refactor list comprehension to filter function * remove storage availability check for node * refactor to quotation marks * Fix trailing newline and incorrect RV usage * rollback filter plugin * Remove action_group reference and add proxmox_backup to meta/runtime.yml * Include note about missing idempotency --------- Co-authored-by: IamLunchbox --- .github/BOTMETA.yml | 2 + meta/runtime.yml | 1 + plugins/modules/proxmox_backup.py | 635 ++++++++++++++++++ .../plugins/modules/test_proxmox_backup.py | 366 ++++++++++ 4 files changed, 1004 insertions(+) create mode 100644 plugins/modules/proxmox_backup.py create mode 100644 tests/unit/plugins/modules/test_proxmox_backup.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 989752a0a7..594f01349a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1117,6 +1117,8 @@ files: $modules/proxmox_kvm.py: ignore: skvidal maintainers: helldorado krauthosting + $modules/proxmox_backup.py: + maintainers: IamLunchbox $modules/proxmox_nic.py: maintainers: Kogelvis krauthosting $modules/proxmox_node_info.py: diff --git a/meta/runtime.yml b/meta/runtime.yml index f5adb64712..387ab0f22b 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -16,6 +16,7 @@ action_groups: - consul_token proxmox: - proxmox + - proxmox_backup - proxmox_disk - proxmox_domain_info - proxmox_group_info diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py new file mode 100644 index 0000000000..fb5750383e --- /dev/null +++ b/plugins/modules/proxmox_backup.py @@ -0,0 +1,635 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024, IamLunchbox +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: proxmox_backup +author: "Raphael Grieger (@IamLunchbox) " +short_description: Start a VM backup in Proxmox VE cluster +version_added: 10.1.0 +description: + - Allows you to create backups of KVM and LXC guests in Proxmox VE cluster. + - Offers the GUI functionality of creating a single backup as well as using the run-now functionality from the cluster backup schedule. + - The mininum required privileges to use this module are C(VM.Backup) and C(Datastore.AllocateSpace) for the respective VMs and storage. + - Most options are optional and if unspecified will be chosen by the Cluster and its default values. + - Note that this module B(is not idempotent). It always starts a new backup (when not in check mode). +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + backup_mode: + description: + - The mode how Proxmox performs backups. The default is, to create a runtime snapshot including memory. + - Check U(https://pve.proxmox.com/pve-docs/chapter-vzdump.html#_backup_modes) for an explanation of the differences. + type: str + choices: ["snapshot", "suspend", "stop"] + default: snapshot + bandwidth: + description: + - Limit the I/O bandwidth (in KiB/s) to write backup. V(0) is unlimited. + type: int + change_detection_mode: + description: + - Set the change detection mode (available from Proxmox VE 8.3). + - > + Is only used when backing up containers, + Proxmox silently ignores this option when applied to kvm guests. + type: str + choices: ["legacy", "data", "metadata"] + compress: + description: + - Enable additional compression of the backup archive. + - V(0) will use the Proxmox recommended value, depending on your storage target. + type: str + choices: ["0", "1", "gzip", "lzo", "zstd"] + compression_threads: + description: + - The number of threads zstd will use to compress the backup. + - V(0) uses 50% of the available cores, anything larger than V(0) will use exactly as many threads. + - Is ignored if you specify O(compress=gzip) or O(compress=lzo). + type: int + description: + description: + - Specify the description of the backup. + - Needs to be a single line, newline and backslash need to be escaped as V(\\n) and V(\\\\) respectively. + - > + If you need variable interpolation, you can set the content as usual + through ansible jinja templating and/or let Proxmox substitute templates. + - > + Proxmox currently supports V({{cluster}}), V({{guestname}}), + V({{node}}), and V({{vmid}}) as templating variables. + Since this is also a jinja delimiter, you need to set these values as raw jinja. + default: "{{guestname}}" + type: str + fleecing: + description: + - Enable backup fleecing. Works only for virtual machines and their disks. + - Must be entered as a string, containing key-value pairs in a list. + type: str + mode: + description: + - Specifices the mode to select backup targets. + choices: ["include", "all", "pool"] + required: true + type: str + node: + description: + - Only execute the backup job for the given node. + - This option is usually used if O(mode=all). + - If you specify a node ID and your vmids or pool do not reside there, they will not be backed up! + type: str + notification_mode: + description: + - Determine which notification system to use. + type: str + choices: ["auto","legacy-sendmail", "notification-system"] + default: auto + performance_tweaks: + description: + - Enable other performance-related settings. + - Must be entered as a string, containing comma separated key-value pairs. + - "For example: V(max-workers=2,pbs-entries-max=2)." + type: str + pool: + description: + - Specify a pool name to limit backups to guests to the given pool. + - Required, when O(mode=pool). + - Also required, when your user only has VM.Backup permission for this single pool. + type: str + protected: + description: + - Marks backups as protected. + - > + "Might fail, when the PBS backend has verify enabled + due to this bug: U(https://bugzilla.proxmox.com/show_bug.cgi?id=4289)" + type: bool + retention: + description: + - > + Use custom retention options instead of those from the default cluster + configuration (which is usually V("keep-all")). + - Always requires Datastore.Allocate permission at the storage endpoint. + - > + Specifying a retention time other than V(keep-all=1) might trigger pruning on the datastore, + if an existing backup should be deleted target due to your specified timeframe. + - Deleting requires C(Datastore.Modify) or C(Datastore.Prune) permissions on the backup storage. + type: str + storage: + description: + - Store the backup archive on this storage. + type: str + required: true + vmids: + description: + - The instance ids to be backed up. + - Only valid, if O(mode=include). + type: list + elements: int + wait: + description: + - Wait for the backup to be finished. + - Fails, if job does not succeed successfully within the given timeout. + type: bool + default: false + wait_timeout: + description: + - Seconds to wait for the backup to be finished. + - Will only be evaluated, if O(wait=true). + type: int + default: 10 +requirements: ["proxmoxer", "requests"] +extends_documentation_fragment: + - community.general.proxmox.actiongroup_proxmox + - community.general.proxmox.documentation + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Backup all vms in the Proxmox cluster to storage mypbs + community.general.proxmox_backup: + api_user: root@pam + api_password: secret + api_host: node1 + storage: mypbs + mode: all + +- name: Backup VMID 100 by stopping it and set an individual retention + community.general.proxmox_backup: + api_user: root@pam + api_password: secret + api_host: node1 + backup-mode: stop + mode: include + retention: keep-daily=5, keep-last=14, keep-monthly=4, keep-weekly=4, keep-yearly=0 + storage: mypbs + vmid: [100] + +- name: Backup all vms on node node2 to storage mypbs and wait for the task to finish + community.general.proxmox_backup: + api_user: test@pve + api_password: 1q2w3e + api_host: node2 + storage: mypbs + mode: all + node: node2 + wait: true + wait_timeout: 30 + +- name: Use all the options + community.general.proxmox_backup: + api_user: root@pam + api_password: secret + api_host: node1 + bandwidth: 1000 + backup_mode: suspend + compress: zstd + compression_threads: 0 + description: A single backup for {% raw %}{{ guestname }}{% endraw %} + mode: include + notification_mode: notification-system + protected: true + retention: keep-monthly=1, keep-weekly=1 + storage: mypbs + vmids: + - 100 + - 101 +''' + +RETURN = r''' +backups: + description: List of nodes and their task IDs. + returned: on success + type: list + elements: dict + contains: + node: + description: Node ID. + returned: on success + type: str + status: + description: Last known task status. Will be unknown, if O(wait=false). + returned: on success + type: str + choices: ["unknown", "success", "failed"] + upid: + description: > + Proxmox cluster UPID, which is needed to lookup task info. + Returns OK, when a cluster node did not create a task after being called, + e.g. due to no matching targets. + returned: on success + type: str +''' + +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible) +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule + +import time + + +class ProxmoxBackupAnsible(ProxmoxAnsible): + + def _get_permissions(self): + return self.proxmox_api.access.permissions.get() + + def _get_resources(self, resource_type=None): + return self.proxmox_api.cluster.resources.get(type=resource_type) + + def _get_tasklog(self, node, upid): + return self.proxmox_api.nodes(node).tasks(upid).log.get() + + def _get_taskok(self, node, upid): + return self.proxmox_api.nodes(node).tasks(upid).status.get() + + def _post_vzdump(self, node, request_body): + return self.proxmox_api.nodes(node).vzdump.post(**request_body) + + def request_backup( + self, + request_body, + node_endpoints): + task_ids = [] + + for node in node_endpoints: + upid = self._post_vzdump(node, request_body) + if upid != "OK": + tasklog = ", ".join( + [logentry["t"] for logentry in self._get_tasklog(node, upid)]) + else: + tasklog = "" + task_ids.extend( + [{"node": node, "upid": upid, "status": "unknown", "log": "%s" % tasklog}]) + return task_ids + + def check_relevant_nodes(self, node): + nodes = [item["node"] for item in self._get_resources( + "node") if item["status"] == "online"] + if node and node not in nodes: + self.module.fail_json( + msg="Node %s was specified, but does not exist on the cluster" % + node) + elif node: + return [node] + return nodes + + def check_storage_permissions( + self, + permissions, + storage, + bandwidth, + performance, + retention): + # Check for Datastore.AllocateSpace in the permission tree + if "/" in permissions.keys() and permissions["/"].get( + "Datastore.AllocateSpace", 0) == 1: + pass + elif "/storage" in permissions.keys() and permissions["/storage"].get("Datastore.AllocateSpace", 0) == 1: + pass + elif "/storage/" + storage in permissions.keys() and permissions["/storage/" + storage].get("Datastore.AllocateSpace", 0) == 1: + pass + else: + self.module.fail_json( + changed=False, + msg="Insufficient permission: Datastore.AllocateSpace is missing") + if (bandwidth or performance) and permissions["/"].get( + "Sys.Modify", 0) == 0: + self.module.fail_json( + changed=False, + msg="Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'") + if retention: + if "/" in permissions.keys() and permissions["/"].get( + "Datastore.Allocate", 0) == 1: + pass + elif "/storage" in permissions.keys() and permissions["/storage"].get("Datastore.Allocate", 0) == 1: + pass + elif "/storage/" + storage in permissions.keys() and permissions["/storage/" + storage].get("Datastore.Allocate", 0) == 1: + pass + else: + self.module.fail_json( + changed=False, + msg="Insufficient permissions: Custom retention was requested, but Datastore.Allocate is missing") + + def check_vmid_backup_permission(self, permissions, vmids, pool): + sufficient_permissions = False + if "/" in permissions.keys() and permissions["/"].get( + "VM.Backup", 0) == 1: + sufficient_permissions = True + elif "/vms" in permissions.keys() and permissions["/"].get( + "VM.Backup", 0) == 1: + sufficient_permissions = True + elif pool and "/pool/" + pool in permissions.keys() and permissions["/pool/" + pool].get( + "VM.Backup", 0) == 1: + sufficient_permissions = True + elif pool and "/pool/" + pool + "/vms" in permissions.keys() and permissions["/pool/" + pool + "/vms"].get( + "VM.Backup", 0) == 1: + sufficient_permissions = True + + if not sufficient_permissions: + # Since VM.Backup can be given for each vmid at a time, iterate through all of them + # and check, if the permission is set + failed_vmids = [] + for vm in vmids: + if "/vms/" + \ + str(vm) in permissions.keys() and permissions["/vms/" + str(vm)].get("VM.Backup", 1) == 0: + failed_vmids.append(str(vm)) + if failed_vmids: + self.module.fail_json( + changed=False, msg="Insufficient permissions: " + "You dont have the VM.Backup permission for VMID %s" % + ", ".join(failed_vmids)) + sufficient_permissions = True + # Finally, when no check succeeded, fail + if not sufficient_permissions: + self.module.fail_json( + changed=False, + msg="Insufficient permissions: You do not have the VM.Backup permission") + + def check_general_backup_permission(self, permissions, pool): + if "/" in permissions.keys() and permissions["/"].get( + "VM.Backup", 0) == 1: + pass + elif "/vms" in permissions.keys() and permissions["/vms"].get("VM.Backup", 0) == 1: + pass + elif pool and "/pool/" + pool in permissions.keys() and permissions["/pool/" + pool].get( + "VM.Backup", 0) == 1: + pass + else: + self.module.fail_json( + changed=False, + msg="Insufficient permissions: You dont have the VM.Backup permission") + + def check_if_storage_exists(self, storage, node): + storages = self.get_storages(type=None) + # Loop through all cluster storages and get all matching storages + validated_storagepath = [storageentry for storageentry in storages if storageentry["storage"] == storage] + if not validated_storagepath: + self.module.fail_json( + changed=False, + msg="Storage %s does not exist in the cluster" % + storage) + + def check_vmids(self, vmids): + cluster_vmids = [vm["vmid"] for vm in self._get_resources("vm")] + if not cluster_vmids: + self.module.warn( + "VM.Audit permission is missing or there are no VMs. This task might fail if one VMID does not exist") + return + vmids_not_found = [str(vm) for vm in vmids if vm not in cluster_vmids] + if vmids_not_found: + self.module.warn( + "VMIDs %s not found. This task will fail if one VMID does not exist" % + ", ".join(vmids_not_found)) + + def wait_for_timeout(self, timeout, raw_tasks): + + # filter all entries, which did not get a task id from the Cluster + tasks = [] + ok_tasks = [] + for node in raw_tasks: + if node["upid"] != "OK": + tasks.append(node) + else: + ok_tasks.append(node) + + start_time = time.time() + # iterate through the task ids and check their values + while True: + for node in tasks: + if node["status"] == "unknown": + try: + # proxmox.api_task_ok does not suffice, since it only + # is true at `stopped` and `ok` + status = self._get_taskok(node["node"], node["upid"]) + if status["status"] == "stopped" and status["exitstatus"] == "OK": + node["status"] = "success" + if status["status"] == "stopped" and status["exitstatus"] in ( + "job errors",): + node["status"] = "failed" + except Exception as e: + self.module.fail_json( + msg="Unable to retrieve API task ID from node %s: %s" % + (node["node"], e)) + if len([item for item in tasks if item["status"] + != "unknown"]) == len(tasks): + break + if time.time() > start_time + timeout: + timeouted_nodes = [node["node"] + for node in tasks if node["status"] == "unknown"] + failed_nodes = [node["node"] + for node in tasks if node["status"] == "failed"] + if failed_nodes: + self.module.fail_json( + msg="Reached timeout while waiting for backup task. " + "Nodes, who reached the timeout: %s. " + "Nodes, which failed: %s" % + (", ".join(timeouted_nodes), ", ".join(failed_nodes))) + self.module.fail_json( + msg="Reached timeout while waiting for creating VM snapshot. " + "Nodes who reached the timeout: %s" % + ", ".join(timeouted_nodes)) + time.sleep(1) + + error_logs = [] + for node in tasks: + if node["status"] == "failed": + tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog( + node["node"], node["upid"])]) + error_logs.append("%s: %s" % (node, tasklog)) + if error_logs: + self.module.fail_json( + msg="An error occured creating the backups. " + "These are the last log lines from the failed nodes: %s" % + ", ".join(error_logs)) + + for node in tasks: + tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog( + node["node"], node["upid"])]) + node["log"] = "%s" % tasklog + + # Finally, reattach ok tasks to show, that all nodes were contacted + tasks.extend(ok_tasks) + return tasks + + def permission_check( + self, + storage, + mode, + node, + bandwidth, + performance_tweaks, + retention, + pool, + vmids): + permissions = self._get_permissions() + self.check_if_storage_exists(storage, node) + self.check_storage_permissions( + permissions, storage, bandwidth, performance_tweaks, retention) + if mode == "include": + self.check_vmid_backup_permission(permissions, vmids, pool) + else: + self.check_general_backup_permission(permissions, pool) + + def prepare_request_parameters(self, module_arguments): + # ensure only valid post parameters are passed to proxmox + # list of dict items to replace with (new_val, old_val) + post_params = [("bwlimit", "bandwidth"), + ("compress", "compress"), + ("fleecing", "fleecing"), + ("mode", "backup_mode"), + ("notes-template", "description"), + ("notification-mode", "notification_mode"), + ("pbs-change-detection-mode", "change_detection_mode"), + ("performance", "performance_tweaks"), + ("pool", "pool"), + ("protected", "protected"), + ("prune-backups", "retention"), + ("storage", "storage"), + ("zstd", "compression_threads"), + ("vmid", "vmids")] + request_body = {} + for new, old in post_params: + if module_arguments.get(old): + request_body.update({new: module_arguments[old]}) + + # Set mode specific values + if module_arguments["mode"] == "include": + request_body.pop("pool", None) + request_body["all"] = 0 + elif module_arguments["mode"] == "all": + request_body.pop("vmid", None) + request_body.pop("pool", None) + request_body["all"] = 1 + elif module_arguments["mode"] == "pool": + request_body.pop("vmid", None) + request_body["all"] = 0 + + # Create comma separated list from vmids, the API expects so + if request_body.get("vmid"): + request_body.update( + {"vmid": ",".join([str(vmid) for vmid in request_body.get("vmid")])}) + + # remove whitespaces from option strings + for key in ("prune-backups", "performance"): + if request_body.get(key): + request_body[key] = request_body[key].replace(" ", "") + # convert booleans to 0/1 + for key in ("protected",): + if request_body.get(key): + request_body[key] = 1 + return request_body + + def backup_create( + self, + module_arguments, + check_mode, + node_endpoints): + request_body = self.prepare_request_parameters(module_arguments) + # stop here, before anything gets changed + if check_mode: + return [] + + task_ids = self.request_backup(request_body, node_endpoints) + updated_task_ids = [] + if module_arguments["wait"]: + updated_task_ids = self.wait_for_timeout( + module_arguments["wait_timeout"], task_ids) + return updated_task_ids if updated_task_ids else task_ids + + +def main(): + module_args = proxmox_auth_argument_spec() + backup_args = { + "backup_mode": {"type": "str", "default": "snapshot", "choices": [ + "snapshot", "suspend", "stop" + ]}, + "bandwidth": {"type": "int"}, + "change_detection_mode": {"type": "str", "choices": [ + "legacy", "data", "metadata" + ]}, + "compress": {"type": "str", "choices": [ + "0", "1", "gzip", "lzo", "zstd" + ]}, + "compression_threads": {"type": "int"}, + "description": {"type": "str", "default": "{{guestname}}"}, + "fleecing": {"type": "str"}, + "mode": {"type": "str", "required": True, "choices": [ + "include", "all", "pool" + ]}, + "node": {"type": "str"}, + "notification_mode": {"type": "str", "default": "auto", "choices": [ + "auto", "legacy-sendmail", "notification-system" + ]}, + "performance_tweaks": {"type": "str"}, + "pool": {"type": "str"}, + "protected": {"type": "bool"}, + "retention": {"type": "str"}, + "storage": {"type": "str", "required": True}, + "vmids": {"type": "list", "elements": "int"}, + "wait": {"type": "bool", "default": False}, + "wait_timeout": {"type": "int", "default": 10}} + module_args.update(backup_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[ + ("mode", "include", ("vmids",), True), + ("mode", "pool", ("pool",)) + ] + ) + proxmox = ProxmoxBackupAnsible(module) + bandwidth = module.params["bandwidth"] + mode = module.params["mode"] + node = module.params["node"] + performance_tweaks = module.params["performance_tweaks"] + pool = module.params["pool"] + retention = module.params["retention"] + storage = module.params["storage"] + vmids = module.params["vmids"] + + proxmox.permission_check( + storage, + mode, + node, + bandwidth, + performance_tweaks, + retention, + pool, + vmids) + if module.params["mode"] == "include": + proxmox.check_vmids(module.params["vmids"]) + node_endpoints = proxmox.check_relevant_nodes(module.params["node"]) + try: + result = proxmox.backup_create( + module.params, module.check_mode, node_endpoints) + except Exception as e: + module.fail_json( + msg="Creating backups failed with exception: %s" % to_native(e)) + if module.check_mode: + module.exit_json(backups=result, changed=True, + msg="Backups would be created") + elif len([entry for entry in result if entry["upid"] == "OK"]) == len(result): + module.exit_json( + backups=result, + changed=False, + msg="Backup request sent to proxmox, no tasks created") + elif module.params["wait"]: + module.exit_json(backups=result, changed=True, msg="Backups succeeded") + else: + module.exit_json(backups=result, changed=True, + msg="Backup tasks created") + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/test_proxmox_backup.py b/tests/unit/plugins/modules/test_proxmox_backup.py new file mode 100644 index 0000000000..8da4de4dee --- /dev/null +++ b/tests/unit/plugins/modules/test_proxmox_backup.py @@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +import \ + ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils +from ansible_collections.community.general.plugins.modules import proxmox_backup +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, AnsibleFailJson, set_module_args, ModuleTestCase) +from ansible_collections.community.general.tests.unit.compat.mock import patch + +__metaclass__ = type + +import pytest + +proxmoxer = pytest.importorskip('proxmoxer') + + +MINIMAL_PERMISSIONS = { + '/sdn/zones': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, + '/nodes': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, + '/sdn': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, + '/vms': {'VM.Audit': 1, + 'Sys.Audit': 1, + 'Mapping.Audit': 1, + 'VM.Backup': 1, + 'Datastore.Audit': 1, + 'SDN.Audit': 1, + 'Pool.Audit': 1}, + '/': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1}, + '/storage/local-zfs': {'Datastore.AllocateSpace': 1, + 'Datastore.Audit': 1}, + '/storage': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, + '/access': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, + '/vms/101': {'VM.Backup': 1, + 'Mapping.Audit': 1, + 'Datastore.AllocateSpace': 0, + 'Sys.Audit': 1, + 'VM.Audit': 1, + 'SDN.Audit': 1, + 'Pool.Audit': 1, + 'Datastore.Audit': 1}, + '/vms/100': {'VM.Backup': 1, + 'Mapping.Audit': 1, + 'Datastore.AllocateSpace': 0, + 'Sys.Audit': 1, + 'VM.Audit': 1, + 'SDN.Audit': 1, + 'Pool.Audit': 1, + 'Datastore.Audit': 1}, + '/pool': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1}, } + +STORAGE = [{'type': 'pbs', + 'username': 'test@pbs', + 'datastore': 'Backup-Pool', + 'server': '10.0.0.1', + 'shared': 1, + 'fingerprint': '94:fd:ac:e7:d5:36:0e:11:5b:23:05:40:d2:a4:e1:8a:c1:52:41:01:07:28:c0:4d:c5:ee:df:7f:7c:03:ab:41', + 'prune-backups': 'keep-all=1', + 'storage': 'backup', + 'content': 'backup', + 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681'}, + {'nodes': 'node1,node2,node3', + 'sparse': 1, + 'type': 'zfspool', + 'content': 'rootdir,images', + 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681', + 'pool': 'rpool/data', + 'storage': 'local-zfs'}] + + +VMS = [{"diskwrite": 0, + "vmid": 100, + "node": "node1", + "id": "lxc/100", + "maxdisk": 10000, + "template": 0, + "disk": 10000, + "uptime": 10000, + "maxmem": 10000, + "maxcpu": 1, + "netin": 10000, + "type": "lxc", + "netout": 10000, + "mem": 10000, + "diskread": 10000, + "cpu": 0.01, + "name": "test-lxc", + "status": "running"}, + {"diskwrite": 0, + "vmid": 101, + "node": "node2", + "id": "kvm/101", + "maxdisk": 10000, + "template": 0, + "disk": 10000, + "uptime": 10000, + "maxmem": 10000, + "maxcpu": 1, + "netin": 10000, + "type": "lxc", + "netout": 10000, + "mem": 10000, + "diskread": 10000, + "cpu": 0.01, + "name": "test-kvm", + "status": "running"} + ] + +NODES = [{'level': '', + 'type': 'node', + 'node': 'node1', + 'status': 'online', + 'id': 'node/node1', + 'cgroup-mode': 2}, + {'status': 'online', + 'id': 'node/node2', + 'cgroup-mode': 2, + 'level': '', + 'node': 'node2', + 'type': 'node'}, + {'status': 'online', + 'id': 'node/node3', + 'cgroup-mode': 2, + 'level': '', + 'node': 'node3', + 'type': 'node'}, + ] + +TASK_API_RETURN = { + "node1": { + 'starttime': 1732606253, + 'status': 'stopped', + 'type': 'vzdump', + 'pstart': 517463911, + 'upid': 'UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:', + 'id': '100', + 'node': 'hypervisor', + 'pid': 541669, + 'user': 'test@pve', + 'exitstatus': 'OK'}, + "node2": { + 'starttime': 1732606253, + 'status': 'stopped', + 'type': 'vzdump', + 'pstart': 517463911, + 'upid': 'UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:', + 'id': '101', + 'node': 'hypervisor', + 'pid': 541669, + 'user': 'test@pve', + 'exitstatus': 'OK'}, +} + + +VZDUMP_API_RETURN = { + "node1": "UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:", + "node2": "UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:", + "node3": "OK", +} + + +TASKLOG_API_RETURN = {"node1": [{'n': 1, + 't': "INFO: starting new backup job: vzdump 100 --mode snapshot --node node1 " + "--notes-template '{{guestname}}' --storage backup --notification-mode auto"}, + {'t': 'INFO: Starting Backup of VM 100 (lxc)', + 'n': 2}, + {'n': 23, 't': 'INFO: adding notes to backup'}, + {'n': 24, + 't': 'INFO: Finished Backup of VM 100 (00:00:03)'}, + {'n': 25, + 't': 'INFO: Backup finished at 2024-11-25 16:28:03'}, + {'t': 'INFO: Backup job finished successfully', + 'n': 26}, + {'n': 27, 't': 'TASK OK'}], + "node2": [{'n': 1, + 't': "INFO: starting new backup job: vzdump 101 --mode snapshot --node node2 " + "--notes-template '{{guestname}}' --storage backup --notification-mode auto"}, + {'t': 'INFO: Starting Backup of VM 101 (kvm)', + 'n': 2}, + {'n': 24, + 't': 'INFO: Finished Backup of VM 100 (00:00:03)'}, + {'n': 25, + 't': 'INFO: Backup finished at 2024-11-25 16:28:03'}, + {'t': 'INFO: Backup job finished successfully', + 'n': 26}, + {'n': 27, 't': 'TASK OK'}], + } + + +def return_valid_resources(resource_type, *args, **kwargs): + if resource_type == "vm": + return VMS + if resource_type == "node": + return NODES + + +def return_vzdump_api(node, *args, **kwargs): + if node in ("node1", "node2", "node3"): + return VZDUMP_API_RETURN[node] + + +def return_logs_api(node, *args, **kwargs): + if node in ("node1", "node2"): + return TASKLOG_API_RETURN[node] + + +def return_task_status_api(node, *args, **kwargs): + if node in ("node1", "node2"): + return TASK_API_RETURN[node] + + +class TestProxmoxBackup(ModuleTestCase): + def setUp(self): + super(TestProxmoxBackup, self).setUp() + proxmox_utils.HAS_PROXMOXER = True + self.module = proxmox_backup + self.connect_mock = patch( + "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect", + ).start() + self.mock_get_permissions = patch.object( + proxmox_backup.ProxmoxBackupAnsible, "_get_permissions").start() + self.mock_get_storages = patch.object(proxmox_utils.ProxmoxAnsible, + "get_storages").start() + self.mock_get_resources = patch.object( + proxmox_backup.ProxmoxBackupAnsible, "_get_resources").start() + self.mock_get_tasklog = patch.object( + proxmox_backup.ProxmoxBackupAnsible, "_get_tasklog").start() + self.mock_post_vzdump = patch.object( + proxmox_backup.ProxmoxBackupAnsible, "_post_vzdump").start() + self.mock_get_taskok = patch.object( + proxmox_backup.ProxmoxBackupAnsible, "_get_taskok").start() + self.mock_get_permissions.return_value = MINIMAL_PERMISSIONS + self.mock_get_storages.return_value = STORAGE + self.mock_get_resources.side_effect = return_valid_resources + self.mock_get_taskok.side_effect = return_task_status_api + self.mock_get_tasklog.side_effect = return_logs_api + self.mock_post_vzdump.side_effect = return_vzdump_api + + def tearDown(self): + self.connect_mock.stop() + self.mock_get_permissions.stop() + self.mock_get_storages.stop() + self.mock_get_resources.stop() + super(TestProxmoxBackup, self).tearDown() + + def test_proxmox_backup_without_argument(self): + set_module_args({}) + with pytest.raises(AnsibleFailJson): + proxmox_backup.main() + + def test_create_backup_check_mode(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "all", + "storage": "backup", + "_ansible_check_mode": True, + }) + with pytest.raises(AnsibleExitJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + + assert result["changed"] is True + assert result["msg"] == "Backups would be created" + assert len(result["backups"]) == 0 + assert self.mock_get_taskok.call_count == 0 + assert self.mock_get_tasklog.call_count == 0 + assert self.mock_post_vzdump.call_count == 0 + + def test_create_backup_all_mode(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "all", + "storage": "backup", + }) + with pytest.raises(AnsibleExitJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + assert result["changed"] is True + assert result["msg"] == "Backup tasks created" + for backup_result in result["backups"]: + assert backup_result["upid"] in { + VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN} + assert self.mock_get_taskok.call_count == 0 + assert self.mock_post_vzdump.call_count == 3 + + def test_create_backup_include_mode_with_wait(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "include", + "node": "node1", + "storage": "backup", + "vmids": [100], + "wait": True + }) + with pytest.raises(AnsibleExitJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + assert result["changed"] is True + assert result["msg"] == "Backups succeeded" + for backup_result in result["backups"]: + assert backup_result["upid"] in { + VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN} + assert self.mock_get_taskok.call_count == 1 + assert self.mock_post_vzdump.call_count == 1 + + def test_fail_insufficient_permissions(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "include", + "storage": "backup", + "performance_tweaks": "max-workers=2", + "vmids": [100], + "wait": True + }) + with pytest.raises(AnsibleFailJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + assert result["msg"] == "Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'" + assert self.mock_get_taskok.call_count == 0 + assert self.mock_post_vzdump.call_count == 0 + + def test_fail_missing_node(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "include", + "storage": "backup", + "node": "nonexistingnode", + "vmids": [100], + "wait": True + }) + with pytest.raises(AnsibleFailJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + assert result["msg"] == "Node nonexistingnode was specified, but does not exist on the cluster" + assert self.mock_get_taskok.call_count == 0 + assert self.mock_post_vzdump.call_count == 0 + + def test_fail_missing_storage(self): + set_module_args({"api_user": "root@pam", + "api_password": "secret", + "api_host": "127.0.0.1", + "mode": "include", + "storage": "nonexistingstorage", + "vmids": [100], + "wait": True + }) + with pytest.raises(AnsibleFailJson) as exc_info: + proxmox_backup.main() + + result = exc_info.value.args[0] + assert result["msg"] == "Storage nonexistingstorage does not exist in the cluster" + assert self.mock_get_taskok.call_count == 0 + assert self.mock_post_vzdump.call_count == 0 From f2dbe08d0e86dcbcb78bba52d9d19d5094089cbb Mon Sep 17 00:00:00 2001 From: Matthieu Bourgain Date: Mon, 2 Dec 2024 20:11:00 +0100 Subject: [PATCH 354/482] Fail if Slack API response is not OK with error message (#9198) * Fails if slack api return is not ok * add changelog * show all error * add doc * Update plugins/modules/slack.py Co-authored-by: Felix Fontein * Update changelogs/fragments/9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- ...il-if-slack-api-response-is-not-ok-with-error-message.yml | 2 ++ plugins/modules/slack.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml diff --git a/changelogs/fragments/9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml b/changelogs/fragments/9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml new file mode 100644 index 0000000000..56ab25f578 --- /dev/null +++ b/changelogs/fragments/9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml @@ -0,0 +1,2 @@ +bugfixes: + - slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198). diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py index 41dd4f5dba..58893b0f42 100644 --- a/plugins/modules/slack.py +++ b/plugins/modules/slack.py @@ -76,7 +76,8 @@ options: message_id: description: - Optional. Message ID to edit, instead of posting a new message. - - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get RV(ignore:channel_id) from previous task run. + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) from previous task run. + - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). type: str version_added: 1.2.0 @@ -391,6 +392,8 @@ def get_slack_message(module, token, channel, ts): if info['status'] != 200: module.fail_json(msg="failed to get slack message") data = module.from_json(response.read()) + if data.get('ok') is False: + module.fail_json(msg="failed to get slack message: %s" % data) if len(data['messages']) < 1: module.fail_json(msg="no messages matching ts: %s" % ts) if len(data['messages']) > 1: From 41b6a281e17572f886cbae6c585a1541a62657f1 Mon Sep 17 00:00:00 2001 From: Stanislav Shamilov Date: Mon, 2 Dec 2024 21:16:00 +0200 Subject: [PATCH 355/482] Add decompress module (#9175) * adds simple implementation of `decompress` module * adds simple test, fixes src and dest arg types * minor refactoring * adds support for common file operations adds integration test for gz decompressing * makes tests parametrized to test all supported compression formats * checks that target file exists * writes to decompressed file now uses atomic_move * adds idempotency for decompression * refactoring, removed classes * adds support for check mode * adds check for destination file. If it exists and it is a directory, the module returns error * refactoring, moves code to a class. Also, simplifies tests (now only tests related to the module core functionality run as parametrized, tests for idempotency and check mode run only for one format) * adds 'remove' parameter that deletes original compressed file after decompression * adds documentation * fixes bug with 'remove' parameter in check mode * makes dest argument not required. Dest filename now can be produced from the src filename * adds dest to output * updates the documentation, adds "RETURN" block * fixes test * adds support for python2 * removes some of the test files that can be generated during testing. Adds copyright header to test files * adds maintainer * apply minor suggestions from code review Co-authored-by: Felix Fontein * fixes code review comments (idempotency issue with non existing src, existing dest and remove=true; fixes the issue and adds test) * refactors the module to use ModuleHelper * refactors lzma dependency manual check to use 'deps.validate' * minor fix * removes registered handlers check * minor refactoring * adds aliases * changes setup for tests * tests: ignores macos and fixes tests for FreeBSD * tests: reverts ignore for macos and fixes issue with centos7 * tests: adds liblzma dependency for python2 * tests: adds backports.lzma * fixes bz2 decompression for python2 * tests: install xz for osx * tests: install xz for osx (2) * fixes code review comments --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/decompress.py | 213 ++++++++++++++++++ tests/integration/targets/decompress/aliases | 7 + .../targets/decompress/files/file.txt | 5 + .../targets/decompress/files/second_file.txt | 5 + .../targets/decompress/handlers/main.yml | 9 + .../targets/decompress/meta/main.yml | 7 + .../targets/decompress/tasks/cleanup.yml | 12 + .../targets/decompress/tasks/core.yml | 29 +++ .../targets/decompress/tasks/dest.yml | 51 +++++ .../targets/decompress/tasks/main.yml | 115 ++++++++++ .../targets/decompress/tasks/misc.yml | 74 ++++++ 12 files changed, 529 insertions(+) create mode 100644 plugins/modules/decompress.py create mode 100644 tests/integration/targets/decompress/aliases create mode 100644 tests/integration/targets/decompress/files/file.txt create mode 100644 tests/integration/targets/decompress/files/second_file.txt create mode 100644 tests/integration/targets/decompress/handlers/main.yml create mode 100644 tests/integration/targets/decompress/meta/main.yml create mode 100644 tests/integration/targets/decompress/tasks/cleanup.yml create mode 100644 tests/integration/targets/decompress/tasks/core.yml create mode 100644 tests/integration/targets/decompress/tasks/dest.yml create mode 100644 tests/integration/targets/decompress/tasks/main.yml create mode 100644 tests/integration/targets/decompress/tasks/misc.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 594f01349a..9650fd0ef3 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -506,6 +506,8 @@ files: ignore: skornehl $modules/dconf.py: maintainers: azaghal + $modules/decompress.py: + maintainers: shamilovstas $modules/deploy_helper.py: maintainers: ramondelafuente $modules/dimensiondata_network.py: diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py new file mode 100644 index 0000000000..818213fb0d --- /dev/null +++ b/plugins/modules/decompress.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: decompress +short_description: Decompresses compressed files +version_added: 10.1.0 +description: + - Decompresses compressed files. + - The source (compressed) file and destination (decompressed) files are on the remote host. + - Source file can be deleted after decompression. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + src: + description: + - Remote absolute path for the file to decompress. + type: path + required: true + dest: + description: + - The file name of the destination file where the compressed file will be decompressed. + - If the destination file exists, it will be truncated and overwritten. + - If not specified, the destination filename will be derived from O(src) by removing the compression format + extension. For example, if O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) will be + V(/path/to/file.txt). If the O(src) file does not have an extension for the current O(format), the O(dest) + filename will be made by appending C(_decompressed) to the O(src) filename. For instance, if O(src) is + V(/path/to/file.myextension), the (dest) filename will be V(/path/to/file.myextension_decompressed). + type: path + format: + description: + - The type of compression to use to decompress. + type: str + choices: [ gz, bz2, xz ] + default: gz + remove: + description: + - Remove original compressed file after decompression. + type: bool + default: false +requirements: + - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format. +author: + - Stanislav Shamilov (@shamilovstas) +''' + +EXAMPLES = r''' +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt (gz compression is used by default) + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt + community.general.decompress: + src: /path/to/file.txt.gz + +- name: Decompress file compressed with bzip2 + community.general.decompress: + src: /path/to/file.txt.bz2 + dest: /path/to/file.bz2 + format: bz2 + +- name: Decompress file and delete the compressed file afterwards + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + remove: true +''' + +RETURN = r''' +dest: + description: Path to decompressed file + type: str + returned: success + sample: /path/to/file.txt +''' + +import bz2 +import filecmp +import gzip +import os +import shutil +import tempfile + +from ansible.module_utils import six +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ModuleHelper +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("lzma"): + if six.PY3: + import lzma + else: + from backports import lzma + + +def lzma_decompress(src): + return lzma.open(src, "rb") + + +def bz2_decompress(src): + if six.PY3: + return bz2.open(src, "rb") + else: + return bz2.BZ2File(src, "rb") + + +def gzip_decompress(src): + return gzip.open(src, "rb") + + +def decompress(b_src, b_dest, handler): + with handler(b_src) as src_file: + with open(b_dest, "wb") as dest_file: + shutil.copyfileobj(src_file, dest_file) + + +class Decompress(ModuleHelper): + destination_filename_template = "%s_decompressed" + use_old_vardict = False + output_params = 'dest' + + module = dict( + argument_spec=dict( + src=dict(type='path', required=True), + dest=dict(type='path'), + format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']), + remove=dict(type='bool', default=False) + ), + add_file_common_args=True, + supports_check_mode=True + ) + + def __init_module__(self): + self.handlers = {"gz": gzip_decompress, "bz2": bz2_decompress, "xz": lzma_decompress} + if self.vars.dest is None: + self.vars.dest = self.get_destination_filename() + deps.validate(self.module) + self.configure() + + def configure(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + if not os.path.exists(b_src): + if self.vars.remove and os.path.exists(b_dest): + self.module.exit_json(changed=False) + else: + self.do_raise(msg="Path does not exist: '%s'" % b_src) + if os.path.isdir(b_src): + self.do_raise(msg="Cannot decompress directory '%s'" % b_src) + if os.path.isdir(b_dest): + self.do_raise(msg="Destination is a directory, cannot decompress: '%s'" % b_dest) + + def __run__(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + + file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest) + handler = self.handlers[self.vars.format] + try: + tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir) + self.module.add_cleanup_file(temppath) + b_temppath = to_bytes(temppath, errors='surrogate_or_strict') + decompress(b_src, b_temppath, handler) + except OSError as e: + self.do_raise(msg="Unable to create temporary file '%s'" % to_native(e)) + + if os.path.exists(b_dest): + self.changed = not filecmp.cmp(b_temppath, b_dest, shallow=False) + else: + self.changed = True + + if self.changed and not self.module.check_mode: + try: + self.module.atomic_move(b_temppath, b_dest) + except OSError: + self.do_raise(msg="Unable to move temporary file '%s' to '%s'" % (b_temppath, self.vars.dest)) + + if self.vars.remove and not self.check_mode: + os.remove(b_src) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + def get_destination_filename(self): + src = self.vars.src + fmt_extension = ".%s" % self.vars.format + if src.endswith(fmt_extension) and len(src) > len(fmt_extension): + filename = src[:-len(fmt_extension)] + else: + filename = Decompress.destination_filename_template % src + return filename + + +def main(): + Decompress.execute() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/decompress/aliases b/tests/integration/targets/decompress/aliases new file mode 100644 index 0000000000..f4049c7dc2 --- /dev/null +++ b/tests/integration/targets/decompress/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 +needs/root +destructive \ No newline at end of file diff --git a/tests/integration/targets/decompress/files/file.txt b/tests/integration/targets/decompress/files/file.txt new file mode 100644 index 0000000000..5d2e0d1458 --- /dev/null +++ b/tests/integration/targets/decompress/files/file.txt @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +This is sample file \ No newline at end of file diff --git a/tests/integration/targets/decompress/files/second_file.txt b/tests/integration/targets/decompress/files/second_file.txt new file mode 100644 index 0000000000..bd04eca21c --- /dev/null +++ b/tests/integration/targets/decompress/files/second_file.txt @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +Content of this file must differ from the 'file.txt' \ No newline at end of file diff --git a/tests/integration/targets/decompress/handlers/main.yml b/tests/integration/targets/decompress/handlers/main.yml new file mode 100644 index 0000000000..8c92cc4f81 --- /dev/null +++ b/tests/integration/targets/decompress/handlers/main.yml @@ -0,0 +1,9 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: delete backports.lzma + pip: + name: backports.lzma + state: absent diff --git a/tests/integration/targets/decompress/meta/main.yml b/tests/integration/targets/decompress/meta/main.yml new file mode 100644 index 0000000000..982de6eb03 --- /dev/null +++ b/tests/integration/targets/decompress/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/decompress/tasks/cleanup.yml b/tests/integration/targets/decompress/tasks/cleanup.yml new file mode 100644 index 0000000000..95db42104f --- /dev/null +++ b/tests/integration/targets/decompress/tasks/cleanup.yml @@ -0,0 +1,12 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Delete decompressed files + file: + path: "{{ remote_tmp_dir }}/file_from_{{ format }}.txt" + state: absent + loop: "{{ formats }}" + loop_control: + loop_var: format \ No newline at end of file diff --git a/tests/integration/targets/decompress/tasks/core.yml b/tests/integration/targets/decompress/tasks/core.yml new file mode 100644 index 0000000000..a92ae21b78 --- /dev/null +++ b/tests/integration/targets/decompress/tasks/core.yml @@ -0,0 +1,29 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set mode for decompressed file ({{ format }} test) + set_fact: + decompressed_mode: "0640" + +- name: Simple decompress ({{ format }} test) + decompress: + src: "{{ remote_tmp_dir }}/file.txt.{{ format }}" + dest: "{{ remote_tmp_dir }}/file_from_{{ format }}.txt" + format: "{{ format }}" + mode: "{{ decompressed_mode }}" + register: first_decompression + +- name: Stat decompressed file ({{ format }} test) + stat: + path: "{{ remote_tmp_dir }}/file_from_{{ format }}.txt" + register: decompressed_file_stat + +- name: Check that file was decompressed correctly ({{ format }} test) + assert: + that: + - first_decompression.changed + - decompressed_file_stat.stat.exists + - decompressed_file_stat.stat.mode == decompressed_mode + - orig_file_stat.stat.checksum == decompressed_file_stat.stat.checksum diff --git a/tests/integration/targets/decompress/tasks/dest.yml b/tests/integration/targets/decompress/tasks/dest.yml new file mode 100644 index 0000000000..9a7bbe499f --- /dev/null +++ b/tests/integration/targets/decompress/tasks/dest.yml @@ -0,0 +1,51 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Copy a compressed file + copy: + src: "{{ item.orig }}" + dest: "{{ item.new }}" + remote_src: true + loop: + - { orig: "{{ remote_tmp_dir }}/file.txt.gz", new: "{{ remote_tmp_dir }}/dest.txt.gz" } + - { orig: "{{ remote_tmp_dir }}/file.txt.gz", new: "{{ remote_tmp_dir }}/dest" } + +- name: Decompress a file without specifying destination + decompress: + src: "{{ remote_tmp_dir }}/dest.txt.gz" + remove: true + +- name: Decompress a file which lacks extension without specifying destination + decompress: + src: "{{ remote_tmp_dir }}/dest" + remove: true + +- name: Stat result files + stat: + path: "{{ remote_tmp_dir }}/{{ filename }}" + loop: + - dest.txt + - dest_decompressed + loop_control: + loop_var: filename + register: result_files_stat + +- name: Test that file exists + assert: + that: "{{ item.stat.exists }}" + quiet: true + loop: "{{ result_files_stat.results }}" + loop_control: + label: "{{ item.stat.path }}" + +- name: Delete test files + file: + path: "{{ filename }}" + state: absent + loop: + - "dest.txt" + - "dest_decompressed" + loop_control: + loop_var: filename diff --git a/tests/integration/targets/decompress/tasks/main.yml b/tests/integration/targets/decompress/tasks/main.yml new file mode 100644 index 0000000000..f14f2d5593 --- /dev/null +++ b/tests/integration/targets/decompress/tasks/main.yml @@ -0,0 +1,115 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Copy test files + copy: + src: "files/" + dest: "{{ remote_tmp_dir }}" + +- name: Get original file stat + stat: + path: "{{ remote_tmp_dir }}/file.txt" + register: orig_file_stat + +- name: Set supported formats + set_fact: + formats: + - bz2 + - gz + - xz + +- name: Ensure xz is present to create compressed files (not Debian) + package: + name: + - xz + - bzip2 + state: latest + when: + - ansible_system != 'FreeBSD' + - ansible_os_family != 'Darwin' + - ansible_os_family != 'Debian' + +- name: Ensure xz is present to create compressed files (Debian) + package: + name: xz-utils + state: latest + when: ansible_os_family == 'Debian' + +- name: Install prerequisites for backports.lzma when using python2 (non OSX) + block: + - name: Set liblzma package name depending on the OS + set_fact: + liblzma_dev_package: + Debian: liblzma-dev + RedHat: xz-devel + Suse: xz-devel + - name: Ensure liblzma-dev is present to install backports-lzma + package: + name: "{{ liblzma_dev_package[ansible_os_family] }}" + state: latest + when: ansible_os_family in liblzma_dev_package.keys() + when: + - ansible_python_version.split('.')[0] == '2' + - ansible_os_family != 'Darwin' + +- name: Install prerequisites for backports.lzma when using python2 (OSX) + block: + - name: Find brew binary + command: which brew + register: brew_which + - name: Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + - name: "Install package" + homebrew: + name: xz + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a + # proper solution can be found + environment: + HOMEBREW_NO_AUTO_UPDATE: "True" + when: + - ansible_os_family == 'Darwin' + +- name: Ensure backports.lzma is present to create test archive (pip) + pip: + name: backports.lzma + state: latest + when: ansible_python_version.split('.')[0] == '2' + notify: + - delete backports.lzma + +- name: Generate compressed files + shell: | + gzip < {{ item }} > {{ item }}.gz + bzip2 < {{ item }} > {{ item }}.bz2 + xz < {{ item }} > {{ item }}.xz + loop: + - "{{ remote_tmp_dir }}/file.txt" + - "{{ remote_tmp_dir }}/second_file.txt" + +# Run tests +- name: Run core tests + block: + - include_tasks: core.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + - import_tasks: cleanup.yml + + +- name: Run idempotency and check mode tests + block: + - import_tasks: misc.yml + - import_tasks: cleanup.yml + +- name: Run tests for destination file + block: + - import_tasks: dest.yml + - import_tasks: cleanup.yml diff --git a/tests/integration/targets/decompress/tasks/misc.yml b/tests/integration/targets/decompress/tasks/misc.yml new file mode 100644 index 0000000000..1514e55030 --- /dev/null +++ b/tests/integration/targets/decompress/tasks/misc.yml @@ -0,0 +1,74 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Decompress with check mode enabled + decompress: + src: "{{ remote_tmp_dir }}/file.txt.gz" + dest: "{{ remote_tmp_dir }}/file_from_gz.txt" + format: gz + check_mode: true + register: decompressed_check_mode + +- name: Decompress second time with check mode enabled + decompress: + src: "{{ remote_tmp_dir }}/file.txt.gz" + dest: "{{ remote_tmp_dir }}/file_from_gz.txt" + format: gz + remove: true + check_mode: true + register: decompressed_check_mode_2 + +- name: Stat original compressed file + stat: + path: "{{ remote_tmp_dir }}/file.txt.gz" + register: original_file + +- name: Stat non-existing file + stat: + path: "{{ remote_tmp_dir }}/file_from_gz.txt" + register: nonexisting_stat + +- name: Check mode test + assert: + that: + - decompressed_check_mode.changed + - decompressed_check_mode_2.changed + - original_file.stat.exists + - not nonexisting_stat.stat.exists + +- name: Copy compressed file + copy: + src: "{{ remote_tmp_dir }}/file.txt.gz" + dest: "{{ remote_tmp_dir }}/file_copied.txt.gz" + remote_src: true + +- name: Decompress, deleting original file + decompress: + src: "{{ remote_tmp_dir }}/file_copied.txt.gz" + dest: "{{ remote_tmp_dir }}/file_copied.txt" + remove: true + +- name: Decompress non existing src + decompress: + src: "{{ remote_tmp_dir }}/file_copied.txt.gz" + dest: "{{ remote_tmp_dir }}/file_copied.txt" + remove: true + register: decompress_non_existing_src + +- name: Stat compressed file + stat: + path: "{{ remote_tmp_dir }}/file_copied.txt.gz" + register: compressed_stat + +- name: Run tests + assert: + that: + - not compressed_stat.stat.exists + - not decompress_non_existing_src.changed + +- name: Delete decompressed file + file: + path: "{{ remote_tmp_dir }}/file_copied.txt" + state: absent From da97e220ef3e859230c35f0fe8802245cb21f40b Mon Sep 17 00:00:00 2001 From: jurelou Date: Mon, 2 Dec 2024 20:16:30 +0100 Subject: [PATCH 356/482] iso_extract: Add password argument (#9159) * iso_extract: Add password argument * Update iso_extract.py * Update iso_extract.py * Update plugins/modules/iso_extract.py Co-authored-by: Felix Fontein * Create 9159-iso-extract_add_password.yml * Update 9159-iso-extract_add_password.yml * Remove default value for password * Use password with 7z only * Apply suggestions from code review Co-authored-by: Felix Fontein * Fix indentation * Update plugins/modules/iso_extract.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * iso_extract: add password warning * Update plugins/modules/iso_extract.py Co-authored-by: Felix Fontein * Shorten a docs line. * Fix formatting. --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .../fragments/9159-iso-extract_add_password.yml | 2 ++ plugins/modules/iso_extract.py | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9159-iso-extract_add_password.yml diff --git a/changelogs/fragments/9159-iso-extract_add_password.yml b/changelogs/fragments/9159-iso-extract_add_password.yml new file mode 100644 index 0000000000..f1b2650d4f --- /dev/null +++ b/changelogs/fragments/9159-iso-extract_add_password.yml @@ -0,0 +1,2 @@ +minor_changes: + - iso_extract - adds ``password`` parameter that is passed to 7z (https://github.com/ansible-collections/community.general/pull/9159). diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py index 087ef2843f..9ef046ede4 100644 --- a/plugins/modules/iso_extract.py +++ b/plugins/modules/iso_extract.py @@ -67,6 +67,15 @@ options: - The path to the C(7z) executable to use for extracting files from the ISO. - If not provided, it will assume the value V(7z). type: path + password: + description: + - Password used to decrypt files from the ISO. + - Will only be used if 7z is used. + - The password is used as a command line argument to 7z. This is a B(potential security risk) that + allows passwords to be revealed if someone else can list running processes on the same machine + in the right moment. + type: str + version_added: 10.1.0 notes: - Only the file checksum (content) is taken into account when extracting files from the ISO image. If O(force=false), only checks the presence of the file. @@ -100,6 +109,7 @@ def main(): dest=dict(type='path', required=True), files=dict(type='list', elements='str', required=True), force=dict(type='bool', default=True), + password=dict(type='str', no_log=True), executable=dict(type='path'), # No default on purpose ), supports_check_mode=True, @@ -108,6 +118,7 @@ def main(): dest = module.params['dest'] files = module.params['files'] force = module.params['force'] + password = module.params['password'] executable = module.params['executable'] result = dict( @@ -154,7 +165,10 @@ def main(): # Use 7zip when we have a binary, otherwise try to mount if binary: - cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files + cmd = [binary, 'x', image, '-o%s' % tmp_dir] + if password: + cmd += ["-p%s" % password] + cmd += extract_files else: cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] From fddccea9403d76fc514bba3f87c1eedc1c0c8b8e Mon Sep 17 00:00:00 2001 From: Thibaut Decombe <68703331+UnknownPlatypus@users.noreply.github.com> Date: Mon, 2 Dec 2024 20:17:04 +0100 Subject: [PATCH 357/482] Greatly speed up homebrew module when multiple packages are passed in the `name` key (#9181) * Increase test coverage and assert output more strictly * Remove unused `_current_package_is_installed_from_head` * Remove `un/changed_count` and infer from un/changed_pkgs length * Track `installed` & `outdated` package state once * Validate package names beforehand * Install packages in 1 brew call instead of N This also has the side effect of fixing the check message so that it prints every packages that will be installed instead of only the first one. * Uninstall packages in 1 brew call instead of N * Link packages in 1 brew call instead of N * Unlink packages in 1 brew call instead of N * Upgrade packages in 1 brew call instead of N * Remove dangling checks * Remove `_status` method and directly return the tuple * Add changelog fragment * Fix invalid format string (nice catch pylint!) * Update changelogs/fragments/9181-improve-homebrew-module-performance.yml Co-authored-by: Felix Fontein * Update brew info parsing for casks * Update changelogs/fragments/9181-improve-homebrew-module-performance.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- ...81-improve-homebrew-module-performance.yml | 2 + plugins/modules/homebrew.py | 372 +++++++++--------- .../targets/homebrew/tasks/formulae.yml | 226 +++++++++++ 3 files changed, 409 insertions(+), 191 deletions(-) create mode 100644 changelogs/fragments/9181-improve-homebrew-module-performance.yml diff --git a/changelogs/fragments/9181-improve-homebrew-module-performance.yml b/changelogs/fragments/9181-improve-homebrew-module-performance.yml new file mode 100644 index 0000000000..b3b6ba2ca4 --- /dev/null +++ b/changelogs/fragments/9181-improve-homebrew-module-performance.yml @@ -0,0 +1,2 @@ +minor_changes: + - homebrew - greatly speed up module when multiple packages are passed in the ``name`` option (https://github.com/ansible-collections/community.general/pull/9181). \ No newline at end of file diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index bc5d8649e7..8eb1b9d689 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -308,21 +308,6 @@ class Homebrew(object): self._params = self.module.params return self._params - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not HomebrewValidate.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package # /class properties -------------------------------------------- }}} def __init__(self, module, path, packages=None, state=None, @@ -347,13 +332,13 @@ class Homebrew(object): def _setup_status_vars(self): self.failed = False self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 self.changed_pkgs = [] self.unchanged_pkgs = [] self.message = '' def _setup_instance_vars(self, **kwargs): + self.installed_packages = set() + self.outdated_packages = set() for key, val in iteritems(kwargs): setattr(self, key, val) @@ -380,8 +365,48 @@ class Homebrew(object): return self.brew_path - def _status(self): - return (self.failed, self.changed, self.message) + def _validate_packages_names(self): + invalid_packages = [] + for package in self.packages: + if not HomebrewValidate.valid_package(package): + invalid_packages.append(package) + + if invalid_packages: + self.failed = True + self.message = 'Invalid package{0}: {1}'.format( + "s" if len(invalid_packages) > 1 else "", + ", ".join(invalid_packages), + ) + raise HomebrewException(self.message) + + def _get_packages_info(self): + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + "--json=v2", + ] + cmd.extend(self.packages) + if self.force_formula: + cmd.append("--formula") + + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.failed = True + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) + raise HomebrewException(self.message) + + data = json.loads(out) + for package_detail in data.get("formulae", []): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_detail["name"]) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_detail["name"]) + + for package_detail in data.get("casks", []): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_detail["token"]) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_detail["token"]) # /prep -------------------------------------------------------- }}} def run(self): @@ -390,60 +415,14 @@ class Homebrew(object): except HomebrewException: pass - if not self.failed and (self.changed_count + self.unchanged_count > 1): + changed_count = len(self.changed_pkgs) + unchanged_count = len(self.unchanged_pkgs) + if not self.failed and (changed_count + unchanged_count > 1): self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, + changed_count, + unchanged_count, ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - "--json=v2", - self.current_package, - ] - if self.force_formula: - cmd.append("--formula") - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.failed = True - self.message = err.strip() or ("Unknown failure with exit code %d" % rc) - raise HomebrewException(self.message) - data = json.loads(out) - - return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks") - - def _current_package_is_outdated(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - self.current_package, - ]) - - return rc != 0 - - def _current_package_is_installed_from_head(self): - if not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} + return (self.failed, self.changed, self.message) # commands ----------------------------------------------------- {{{ def _run(self): @@ -454,6 +433,8 @@ class Homebrew(object): self._upgrade_all() if self.packages: + self._validate_packages_names() + self._get_packages_info() if self.state == 'installed': return self._install_packages() elif self.state == 'upgraded': @@ -523,19 +504,22 @@ class Homebrew(object): # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ - def _install_current_package(self): - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already installed: {0}'.format( - self.current_package, + def _install_packages(self): + packages_to_install = set(self.packages) - self.installed_packages + + if len(packages_to_install) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already installed: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package + self.message = 'Package{0} would be installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) ) raise HomebrewException(self.message) @@ -552,72 +536,28 @@ class Homebrew(object): opts = ( [self.brew_path, 'install'] + self.install_options - + [self.current_package, head, formula] + + list(packages_to_install) + + [head, formula] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(packages_to_install) + self.unchanged_pkgs.extend(self.installed_packages) self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) + self.message = 'Package{0} installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - current_package_is_installed = self._current_package_is_installed() - if not current_package_is_installed: - command = 'install' - - if current_package_is_installed and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - def _upgrade_all_packages(self): opts = ( [self.brew_path, 'upgrade'] @@ -639,138 +579,188 @@ class Homebrew(object): if not self.packages: self._upgrade_all_packages() else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True + # There are 3 action possible here depending on installed and outdated states: + # - not installed -> 'install' + # - installed and outdated -> 'upgrade' + # - installed and NOT outdated -> Nothing to do! + packages_to_install = set(self.packages) - self.installed_packages + packages_to_upgrade = self.installed_packages & self.outdated_packages + packages_to_install_or_upgrade = packages_to_install | packages_to_upgrade + + if len(packages_to_install_or_upgrade) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already upgraded: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade) + ) + raise HomebrewException(self.message) + + for command, packages in [ + ("install", packages_to_install), + ("upgrade", packages_to_upgrade) + ]: + if not packages: + continue + + opts = ( + [self.brew_path, command] + + self.install_options + + list(packages) + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc != 0: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + self.changed_pkgs.extend(packages_to_install_or_upgrade) + self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade) + self.changed = True + self.message = 'Package{0} upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade), + ) # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, + def _uninstall_packages(self): + packages_to_uninstall = self.installed_packages & set(self.packages) + + if len(packages_to_uninstall) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already uninstalled: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package + self.message = 'Package{0} would be uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'uninstall', '--force'] + self.install_options - + [self.current_package] + + list(packages_to_uninstall) ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + if rc == 0: + self.changed_pkgs.extend(packages_to_uninstall) + self.unchanged_pkgs.extend(set(self.packages) - self.installed_packages) self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) + self.message = 'Package{0} uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True # /uninstalled ----------------------------- }}} # linked --------------------------------- {{{ - def _link_current_package(self): - if not self._current_package_is_installed(): + def _link_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package + self.message = 'Package{0} would be linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'link'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - + self.message = 'Package{0} linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be linked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True # /linked -------------------------------- }}} # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self._current_package_is_installed(): + def _unlink_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package + self.message = 'Package{0} would be unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'unlink'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - + self.message = 'Package{0} unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be unlinked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True # /unlinked ------------------------------ }}} # /commands ---------------------------------------------------- }}} diff --git a/tests/integration/targets/homebrew/tasks/formulae.yml b/tests/integration/targets/homebrew/tasks/formulae.yml index 1ca8d753e7..21276e3a2e 100644 --- a/tests/integration/targets/homebrew/tasks/formulae.yml +++ b/tests/integration/targets/homebrew/tasks/formulae.yml @@ -56,6 +56,9 @@ - assert: that: - package_result.changed + - "package_result.msg == 'Package installed: gnu-tar'" + - "package_result.changed_pkgs == ['gnu-tar']" + - "package_result.unchanged_pkgs == []" - name: Again install {{ package_name }} package using homebrew homebrew: @@ -69,6 +72,41 @@ - assert: that: - not package_result.changed + - "package_result.msg == 'Package already installed: gnu-tar'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs == ['gnu-tar']" + + - name: Unlink {{ package_name }} package using homebrew + homebrew: + name: "{{ package_name }}" + state: unlinked + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Package unlinked: gnu-tar'" + - "package_result.changed_pkgs == ['gnu-tar']" + - "package_result.unchanged_pkgs == []" + + - name: Link {{ package_name }} package using homebrew + homebrew: + name: "{{ package_name }}" + state: linked + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Package linked: gnu-tar'" + - "package_result.changed_pkgs == ['gnu-tar']" + - "package_result.unchanged_pkgs == []" - name: Uninstall {{ package_name }} package using homebrew homebrew: @@ -82,6 +120,9 @@ - assert: that: - package_result.changed + - "package_result.msg == 'Package uninstalled: gnu-tar'" + - "package_result.changed_pkgs == ['gnu-tar']" + - "package_result.unchanged_pkgs == []" - name: Again uninstall {{ package_name }} package using homebrew homebrew: @@ -95,3 +136,188 @@ - assert: that: - not package_result.changed + - "package_result.msg == 'Package already uninstalled: gnu-tar'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs == ['gnu-tar']" + + - name: Upgrade {{ package_name }} package using homebrew + homebrew: + name: "{{ package_name }}" + state: latest + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Package upgraded: gnu-tar'" + - "package_result.changed_pkgs == ['gnu-tar']" + - "package_result.unchanged_pkgs == []" + + - name: Again upgrade {{ package_name }} package using homebrew + homebrew: + name: "{{ package_name }}" + state: latest + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - not package_result.changed + - "package_result.msg == 'Package already upgraded: gnu-tar'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs == ['gnu-tar']" + +- vars: + package_names: + - gnu-tar + - gnu-time + + block: + - name: Make sure {{ package_names }} packages are not installed + homebrew: + name: "{{ package_names }}" + state: absent + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + + - name: Install only {{ package_names[0] }} package using homebrew + homebrew: + name: "{{ package_names[0] }}" + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + + - name: Install {{ package_names }} packages using homebrew (one is already installed) + homebrew: + name: "{{ package_names }}" + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Changed: 1, Unchanged: 1'" + - "package_result.changed_pkgs == ['gnu-time']" + - "package_result.unchanged_pkgs == ['gnu-tar']" + + - name: Again install {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - not package_result.changed + - "package_result.msg == 'Changed: 0, Unchanged: 2'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" + + - name: Unlink {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: unlinked + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Changed: 2, Unchanged: 0'" + - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" + - "package_result.unchanged_pkgs == []" + + - name: Link {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: linked + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Changed: 2, Unchanged: 0'" + - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" + - "package_result.unchanged_pkgs == []" + + - name: Uninstall {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: absent + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Changed: 2, Unchanged: 0'" + - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" + - "package_result.unchanged_pkgs == []" + + - name: Again uninstall {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: absent + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - not package_result.changed + - "package_result.msg == 'Changed: 0, Unchanged: 2'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" + + - name: Upgrade {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: latest + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - package_result.changed + - "package_result.msg == 'Changed: 2, Unchanged: 0'" + - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" + - "package_result.unchanged_pkgs == []" + + - name: Again upgrade {{ package_names }} packages using homebrew + homebrew: + name: "{{ package_names }}" + state: latest + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: package_result + + - assert: + that: + - not package_result.changed + - "package_result.msg == 'Changed: 0, Unchanged: 2'" + - "package_result.changed_pkgs == []" + - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" From ab0959480ec93be2490b1f7d277ad7da32841bef Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 3 Dec 2024 08:17:44 +1300 Subject: [PATCH 358/482] redfish_utils module utils: remove redundant code (#9190) deprecate method instead --- changelogs/fragments/9190-redfish-utils-unused-code.yml | 4 ++++ plugins/module_utils/redfish_utils.py | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/9190-redfish-utils-unused-code.yml diff --git a/changelogs/fragments/9190-redfish-utils-unused-code.yml b/changelogs/fragments/9190-redfish-utils-unused-code.yml new file mode 100644 index 0000000000..47f7588b96 --- /dev/null +++ b/changelogs/fragments/9190-redfish-utils-unused-code.yml @@ -0,0 +1,4 @@ +minor_changes: + - redfish_utils module utils - remove redundant code (https://github.com/ansible-collections/community.general/pull/9190). +deprecated_features: + - redfish_utils module utils - deprecate method ``RedfishUtils._init_session()`` (https://github.com/ansible-collections/community.general/pull/9190). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 388fc93669..dd559921ae 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -55,7 +55,6 @@ class RedfishUtils(object): self.strip_etag_quotes = strip_etag_quotes self.ciphers = ciphers self._vendor = None - self._init_session() def _auth_params(self, headers): """ @@ -411,7 +410,7 @@ class RedfishUtils(object): return msg, data def _init_session(self): - pass + self.module.deprecate("Method _init_session is deprecated and will be removed.", version="11.0.0", collection_name="community.general") def _get_vendor(self): # If we got the vendor info once, don't get it again From 82462e407e06e32d7b3a694e174a54d6852ac38f Mon Sep 17 00:00:00 2001 From: Ian Bishop <151477169+ianb-mp@users.noreply.github.com> Date: Tue, 3 Dec 2024 05:18:28 +1000 Subject: [PATCH 359/482] Add SR-IOV support to nmcli module (#9168) * Add SR-IOV support to nmcli module (#9168) * Add SR-IOV support to nmcli module (#9168) Fixes * Add SR-IOV support to nmcli module (#9168) Add test * Update changelogs/fragments/9168-nmcli-add-sriov-parameter.yml Co-authored-by: Felix Fontein * Update plugins/modules/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/nmcli.py Co-authored-by: Felix Fontein * Populate sriov options --------- Co-authored-by: Felix Fontein --- .../9168-nmcli-add-sriov-parameter.yml | 2 + plugins/modules/nmcli.py | 41 ++++++++++++ tests/unit/plugins/modules/test_nmcli.py | 64 +++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 changelogs/fragments/9168-nmcli-add-sriov-parameter.yml diff --git a/changelogs/fragments/9168-nmcli-add-sriov-parameter.yml b/changelogs/fragments/9168-nmcli-add-sriov-parameter.yml new file mode 100644 index 0000000000..77f28e73bf --- /dev/null +++ b/changelogs/fragments/9168-nmcli-add-sriov-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add ``sriov`` parameter that enables support for SR-IOV settings (https://github.com/ansible-collections/community.general/pull/9168). diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index e2803432a9..4ea6799577 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -1058,6 +1058,38 @@ options: You can encode using this Ansible jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}"). - This is only used when O(vpn.ipsec-enabled=true). type: str + sriov: + description: + - Allow to configure SR-IOV settings. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.pages.freedesktop.org/NetworkManager/NetworkManager/settings-sriov.html).' + type: dict + version_added: 10.1.0 + suboptions: + autoprobe-drivers: + description: + - Whether to autoprobe virtual functions by a compatible driver. + type: int + eswitch-encap-mode: + description: + - Select the eswitch encapsulation support. + type: int + eswitch-inline-mode: + description: + - Select the eswitch inline-mode of the device. + type: int + eswitch-mode: + description: + - Select the eswitch mode of the device. + type: int + total-vfs: + description: Number of virtual functions to create. Consult your NIC documentation for the maximum number of VFs supported. + type: int + vfs: + description: + - 'Virtual function descriptors in the form: V(INDEX [ATTR=VALUE[ ATTR=VALUE]...]).' + - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3 vlans=100). + type: str ''' EXAMPLES = r''' @@ -1687,6 +1719,7 @@ class Nmcli(object): self.wireguard = module.params['wireguard'] self.vpn = module.params['vpn'] self.transport_mode = module.params['transport_mode'] + self.sriov = module.params['sriov'] if self.method4: self.ipv4_method = self.method4 @@ -1952,6 +1985,13 @@ class Nmcli(object): 'infiniband.transport-mode': self.transport_mode, }) + if self.type == 'ethernet': + if self.sriov: + for name, value in self.sriov.items(): + options.update({ + 'sriov.%s' % name: value, + }) + # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -2607,6 +2647,7 @@ def main(): wireguard=dict(type='dict'), vpn=dict(type='dict'), transport_mode=dict(type='str', choices=['datagram', 'connected']), + sriov=dict(type='dict'), ), mutually_exclusive=[['never_default4', 'gw4'], ['routes4_extended', 'routes4'], diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py index 570b04d56f..89e8de6d64 100644 --- a/tests/unit/plugins/modules/test_nmcli.py +++ b/tests/unit/plugins/modules/test_nmcli.py @@ -357,6 +357,28 @@ ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no """ +TESTCASE_ETHERNET_ADD_SRIOV_VFS = [ + { + 'type': 'ethernet', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'ethernet_non_existant', + 'sriov': { + 'total-vfs': 16, + 'vfs': '0 spoof-check=true vlans=100', + }, + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_ETHERNET_ADD_SRIOV_VFS_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: ethernet_non_existant +connection.autoconnect: yes +sriov.total-vfs: 16 +sriov.vfs: 0 spoof-check=true vlans=100 +""" + TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC = [ { 'type': 'ethernet', @@ -1806,6 +1828,12 @@ def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_c )) +@pytest.fixture +def mocked_ethernet_connection_with_sriov_vfs_create(mocker): + mocker_set(mocker, + execute_return=(0, TESTCASE_ETHERNET_ADD_SRIOV_VFS_SHOW_OUTPUT, "")) + + @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create(mocker): mocker_set(mocker, @@ -3312,6 +3340,41 @@ def test_ethernet_connection_static_ipv6_address_multiple_static_routes_with_met assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_SRIOV_VFS, indirect=['patch_ansible_module']) +def test_ethernet_connection_sriov_vfs_create( + mocked_ethernet_connection_with_sriov_vfs_create, capfd): + """ + Test : Create ethernet connection with SR-IOV VFs + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'ethernet' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + + for param in ['connection.interface-name', 'ethernet_non_existant', + 'con-name', 'non_existent_nw_device', + 'sriov.total-vfs', '16', + 'sriov.vfs', '0 spoof-check=true vlans=100']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module']) def test_ethernet_connection_static_ipv6_address_static_route_with_metric_create( mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create, capfd): @@ -4384,6 +4447,7 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): wireguard=dict(type='dict'), vpn=dict(type='dict'), transport_mode=dict(type='str', choices=['datagram', 'connected']), + sriov=dict(type='dict'), ), mutually_exclusive=[['never_default4', 'gw4'], ['routes4_extended', 'routes4'], From 34010a788a1089f1d402f1ccfce2ec1b617ae98e Mon Sep 17 00:00:00 2001 From: ONODERA Masaru <46081939+masa-orca@users.noreply.github.com> Date: Tue, 3 Dec 2024 04:18:56 +0900 Subject: [PATCH 360/482] Add dynamicforward option (#9192) * Add dynamicforward option * Add fragment * Modify fragment --- .../ssh_config_add_dynamicforward_option.yml | 2 ++ plugins/modules/ssh_config.py | 7 +++++++ .../targets/ssh_config/tasks/options.yml | 20 ++++++++++++++++++- 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/ssh_config_add_dynamicforward_option.yml diff --git a/changelogs/fragments/ssh_config_add_dynamicforward_option.yml b/changelogs/fragments/ssh_config_add_dynamicforward_option.yml new file mode 100644 index 0000000000..0252c94c46 --- /dev/null +++ b/changelogs/fragments/ssh_config_add_dynamicforward_option.yml @@ -0,0 +1,2 @@ +minor_changes: + - ssh_config - add ``dynamicforward`` option (https://github.com/ansible-collections/community.general/pull/9192). \ No newline at end of file diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index d974f45373..1f8098b25f 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -139,6 +139,11 @@ options: - Sets the C(ControlPersist) option. type: str version_added: 8.1.0 + dynamicforward: + description: + - Sets the C(DynamicForward) option. + type: str + version_added: 10.1.0 requirements: - paramiko ''' @@ -272,6 +277,7 @@ class SSHConfig(object): controlmaster=self.params.get('controlmaster'), controlpath=self.params.get('controlpath'), controlpersist=fix_bool_str(self.params.get('controlpersist')), + dynamicforward=self.params.get('dynamicforward'), ) config_changed = False @@ -376,6 +382,7 @@ def main(): controlmaster=dict(type='str', default=None, choices=['yes', 'no', 'ask', 'auto', 'autoask']), controlpath=dict(type='str', default=None), controlpersist=dict(type='str', default=None), + dynamicforward=dict(type='str'), user=dict(default=None, type='str'), user_known_hosts_file=dict(type='str', default=None), ), diff --git a/tests/integration/targets/ssh_config/tasks/options.yml b/tests/integration/targets/ssh_config/tasks/options.yml index f88f99081f..203c782487 100644 --- a/tests/integration/targets/ssh_config/tasks/options.yml +++ b/tests/integration/targets/ssh_config/tasks/options.yml @@ -21,6 +21,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add check_mode: true @@ -55,6 +56,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add @@ -78,6 +80,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add_again @@ -105,6 +108,7 @@ - "'controlmaster auto' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 10080' in slurp_ssh_config['content'] | b64decode" - name: Options - Update host community.general.ssh_config: @@ -118,6 +122,7 @@ controlmaster: no controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" + dynamicforward: '11080' state: present register: options_update @@ -143,6 +148,7 @@ controlmaster: no controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" + dynamicforward: '11080' state: present register: options_update @@ -171,6 +177,7 @@ - "'controlmaster no' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" - name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook community.general.ssh_config: @@ -204,6 +211,7 @@ - "'controlmaster no' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" - name: Debug debug: @@ -255,8 +263,9 @@ - "'controlmaster auto' not in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/sockets/%r@%h-%p' not in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' not in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 10080' not in slurp_ssh_config['content'] | b64decode" -# Proxycommand and ProxyJump are mutually exclusive. +# Proxycommand and ProxyJump are mutually exclusive. # Reset ssh_config before testing options with proxyjump - name: Copy sample config file @@ -276,6 +285,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add check_mode: true @@ -310,6 +320,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add @@ -333,6 +344,7 @@ controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" controlpersist: yes + dynamicforward: '10080' state: present register: options_add_again @@ -360,6 +372,7 @@ - "'controlmaster auto' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 10080' in slurp_ssh_config['content'] | b64decode" - name: Options - Update host community.general.ssh_config: @@ -373,6 +386,7 @@ controlmaster: no controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" + dynamicforward: '11080' state: present register: options_update @@ -398,6 +412,7 @@ controlmaster: no controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" + dynamicforward: '11080' state: present register: options_update @@ -426,6 +441,7 @@ - "'controlmaster no' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" - name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook community.general.ssh_config: @@ -459,6 +475,7 @@ - "'controlmaster no' in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" - name: Debug debug: @@ -510,3 +527,4 @@ - "'controlmaster auto' not in slurp_ssh_config['content'] | b64decode" - "'controlpath ~/.ssh/sockets/%r@%h-%p' not in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' not in slurp_ssh_config['content'] | b64decode" + - "'dynamicforward 10080' not in slurp_ssh_config['content'] | b64decode" From d826dd1c8826fd87ea642d0c8c855bcc2b10180e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 3 Dec 2024 08:19:36 +1300 Subject: [PATCH 361/482] opkg: deprecate value "" for force (#9172) * opkg: deprecate value "" for force * fix sanity plus wording * add comments for future removal * add changelog frag --- changelogs/fragments/9172-opkg-deprecate-force-none.yml | 2 ++ plugins/modules/opkg.py | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/9172-opkg-deprecate-force-none.yml diff --git a/changelogs/fragments/9172-opkg-deprecate-force-none.yml b/changelogs/fragments/9172-opkg-deprecate-force-none.yml new file mode 100644 index 0000000000..1b11419c5a --- /dev/null +++ b/changelogs/fragments/9172-opkg-deprecate-force-none.yml @@ -0,0 +1,2 @@ +deprecated_features: + - opkg - deprecate value ``""`` for parameter ``force`` (https://github.com/ansible-collections/community.general/pull/9172). diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index da51755efb..32cb2753f5 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -46,8 +46,7 @@ options: force: description: - The C(opkg --force) parameter used. - - Passing V("") as value and not passing any value at all have both - the same effect of B(not) using any C(--force-) parameter. + - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain the same behavior. choices: - "" - "depends" @@ -152,7 +151,11 @@ class Opkg(StateModuleHelper): ) def _force(value): + # 12.0.0 function _force() to be removed entirely if value == "": + self.deprecate('Value "" is deprecated. Simply omit the parameter "force" to prevent any --force-X argument when running opkg', + version="12.0.0", + collection_name="community.general") value = None return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True) @@ -164,7 +167,7 @@ class Opkg(StateModuleHelper): arg_formats=dict( package=cmd_runner_fmt.as_list(), state=cmd_runner_fmt.as_map(state_map), - force=cmd_runner_fmt.as_func(_force), + force=cmd_runner_fmt.as_func(_force), # 12.0.0 replace with cmd_runner_fmt.as_optval("--force-") update_cache=cmd_runner_fmt.as_bool("update"), version=cmd_runner_fmt.as_fixed("--version"), ), From a789bd128f9c88022084d82378fa28f3bdf24748 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Mon, 2 Dec 2024 20:20:13 +0100 Subject: [PATCH 362/482] Add the accumulate filter (#9133) * Add the accumulate filter - Add myself as a maintainer for it. - Some integration tests. * accumulate: fix documentation and add test aliases The aliases file was copied over from tests/integrations/targets/filter_dict/aliases as the documentation[1] suggests to use the same group as existing similar tests. [1]: https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html Suggested-by: Felix Fontein * accumulate: documentation: markup consistency with other plugins Suggested-by: Felix Fontein Suggested-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * filter/accumulate: Validate input is a Sequence Accepting arbitrary iterables might lead to surprising behavior so we are stricter on what we accept in the filter. Relaxing those requirements is easier than retrofitting them, in terms of backwards compatibility. Suggested-by: Felix Fontein Signed-off-by: Max Gautier * filter/accumulate: Document the behavior with a string Signed-off-by: Max Gautier --------- Signed-off-by: Max Gautier --- .github/BOTMETA.yml | 2 + plugins/filter/accumulate.py | 62 +++++++++++++++++++ .../targets/filter_accumulate/aliases | 5 ++ .../targets/filter_accumulate/tasks/main.yml | 35 +++++++++++ 4 files changed, 104 insertions(+) create mode 100644 plugins/filter/accumulate.py create mode 100644 tests/integration/targets/filter_accumulate/aliases create mode 100644 tests/integration/targets/filter_accumulate/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 9650fd0ef3..ec9b9b7ddc 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -135,6 +135,8 @@ files: $doc_fragments/xenserver.py: labels: xenserver maintainers: bvitnik + $filters/accumulate.py: + maintainers: VannTen $filters/counter.py: maintainers: keilr $filters/crc32.py: diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py new file mode 100644 index 0000000000..9400936e1d --- /dev/null +++ b/plugins/filter/accumulate.py @@ -0,0 +1,62 @@ +# Copyright (c) Max Gautier +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = ''' + name: accumulate + short_description: Produce a list of accumulated sums of the input list contents + version_added: 10.1.0 + author: Max Gautier (@VannTen) + description: + - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). + - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. + - Addition means the default Python implementation of C(+) for input list elements type. + options: + _input: + description: A list. + type: list + elements: any + required: true +''' + +RETURN = ''' + _value: + description: A list of cumulated sums of the elements of the input list. + type: list + elements: any +''' + +EXAMPLES = ''' +- name: Enumerate parent directories of some path + ansible.builtin.debug: + var: > + "/some/path/to/my/file" + | split('/') | map('split', '/') + | community.general.accumulate | map('join', '/') + # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file'] +- name: Growing string + ansible.builtin.debug: + var: "'abc' | community.general.accumulate" + # Produces ['a', 'ab', 'abc'] +''' + +from itertools import accumulate +from collections.abc import Sequence + +from ansible.errors import AnsibleFilterError + + +def list_accumulate(sequence): + if not isinstance(sequence, Sequence): + raise AnsibleFilterError('Invalid value type (%s) for accumulate (%r)' % + (type(sequence), sequence)) + + return accumulate(sequence) + + +class FilterModule(object): + + def filters(self): + return { + 'accumulate': list_accumulate, + } diff --git a/tests/integration/targets/filter_accumulate/aliases b/tests/integration/targets/filter_accumulate/aliases new file mode 100644 index 0000000000..343f119da8 --- /dev/null +++ b/tests/integration/targets/filter_accumulate/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 diff --git a/tests/integration/targets/filter_accumulate/tasks/main.yml b/tests/integration/targets/filter_accumulate/tasks/main.yml new file mode 100644 index 0000000000..8fe854228a --- /dev/null +++ b/tests/integration/targets/filter_accumulate/tasks/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright (c), Max Gautier +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Filter | Accumulate | Test valid values + assert: + that: + - "'abc' | community.general.accumulate == ['a', 'ab', 'abc']" + - "['a', 'b'] | community.general.accumulate == ['a', 'ab']" + - "[1, 2, 3] | community.general.accumulate == [1, 3, 6]" + - "[['foo'],['bar'],['foobar']] | community.general.accumulate == [['foo'], ['foo', 'bar'], ['foo', 'bar', 'foobar']]" + - "'path/to/file' | split('/') | map('split', '/') | community.general.accumulate | map('join', '/') == ['path', 'path/to', 'path/to/file']" + - "[{'foo':1}, {'bar':2}] | map('dict2items') | community.general.accumulate | map('items2dict') == [{'foo':1}, {'foo':1, 'bar':2}]" + + +- name: Filter | Accumulate | Test invalid values | Integer + debug: + var: "1 | community.general.accumulate" + register: integer_result + ignore_errors: true + +- name: Filter | Accumulate | Test invalid values | Non uniform list + debug: + var: "['aa', 1] | community.general.accumulate" + register: non_uniform_list_result + ignore_errors: true + +- name: Filter | Accumulate | Test invalid values | Check errors + assert: + that: + - integer_result is failed + - integer_result.msg is match('Invalid value type.*') + - non_uniform_list_result is failed + - non_uniform_list_result.msg is match('Unexpected templating type error.*can only concatenate str.*') From 55d714da81fb6caa018cada300315b6086042c8e Mon Sep 17 00:00:00 2001 From: fgruenbauer Date: Mon, 2 Dec 2024 20:21:26 +0100 Subject: [PATCH 363/482] keycloak_clientscope_type: sort default and optional clientscope lists before diff (#9202) * sort default and optional clientscope lists before diff * add changelog fragment --- .../9202-keycloak_clientscope_type-sort-lists.yml | 2 ++ plugins/modules/keycloak_clientscope_type.py | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9202-keycloak_clientscope_type-sort-lists.yml diff --git a/changelogs/fragments/9202-keycloak_clientscope_type-sort-lists.yml b/changelogs/fragments/9202-keycloak_clientscope_type-sort-lists.yml new file mode 100644 index 0000000000..ef9fc7a6f7 --- /dev/null +++ b/changelogs/fragments/9202-keycloak_clientscope_type-sort-lists.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientscope_type - sort the default and optional clientscope lists to improve the diff (https://github.com/ansible-collections/community.general/pull/9202). \ No newline at end of file diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index 0c919afdad..1fb9a0813c 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -190,6 +190,15 @@ def extract_field(dictionary, field='name'): return [cs[field] for cs in dictionary] +def normalize_scopes(scopes): + scopes_copy = scopes.copy() + if isinstance(scopes_copy.get('default_clientscopes'), list): + scopes_copy['default_clientscopes'] = sorted(scopes_copy['default_clientscopes']) + if isinstance(scopes_copy.get('optional_clientscopes'), list): + scopes_copy['optional_clientscopes'] = sorted(scopes_copy['optional_clientscopes']) + return scopes_copy + + def main(): """ Module keycloak_clientscope_type @@ -244,7 +253,7 @@ def main(): }) if module._diff: - result['diff'] = dict(before=result['existing'], after=result['proposed']) + result['diff'] = dict(before=normalize_scopes(result['existing']), after=normalize_scopes(result['proposed'])) default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) From e5761bd7c745c3b03d083d1d685badc0ea4ba577 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 2 Dec 2024 21:13:56 +0100 Subject: [PATCH 364/482] The next feature release will be 10.2.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 4daf0e0ac0..4ff0768938 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 10.1.0 +version: 10.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 6f87bf2bade07f5ef47f15700b7d0ae89c78145a Mon Sep 17 00:00:00 2001 From: IamLunchbox <56757745+IamLunchbox@users.noreply.github.com> Date: Mon, 9 Dec 2024 19:25:13 +0100 Subject: [PATCH 365/482] Fix incorrect key lookup (#9223) * Fix incorrect key lookup * Create changelog fragment --- changelogs/fragments/9223-proxmox-backup-bugfixes.yml | 2 ++ plugins/modules/proxmox_backup.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9223-proxmox-backup-bugfixes.yml diff --git a/changelogs/fragments/9223-proxmox-backup-bugfixes.yml b/changelogs/fragments/9223-proxmox-backup-bugfixes.yml new file mode 100644 index 0000000000..559e1f45bc --- /dev/null +++ b/changelogs/fragments/9223-proxmox-backup-bugfixes.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox_backup - fix incorrect key lookup in vmid permission check (https://github.com/ansible-collections/community.general/pull/9223). diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py index fb5750383e..575d492bf9 100644 --- a/plugins/modules/proxmox_backup.py +++ b/plugins/modules/proxmox_backup.py @@ -325,7 +325,7 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): if "/" in permissions.keys() and permissions["/"].get( "VM.Backup", 0) == 1: sufficient_permissions = True - elif "/vms" in permissions.keys() and permissions["/"].get( + elif "/vms" in permissions.keys() and permissions["/vms"].get( "VM.Backup", 0) == 1: sufficient_permissions = True elif pool and "/pool/" + pool in permissions.keys() and permissions["/pool/" + pool].get( From d7ad7c2dcad5bde031ffd969059ebd0e77c90b2c Mon Sep 17 00:00:00 2001 From: snailed Date: Mon, 9 Dec 2024 19:26:53 +0100 Subject: [PATCH 366/482] xbps: support --rootdir and --repository (#9174) * xbps: support --rootdir and --repository * please the robot * rename repository arg to repositories * skip repo flag when querying package state * add accept_pubkey param, detect pubkey import fail * add example for manually copying signing keys * bugfix package removal * fix typos * change root param type to path * fix "root" type, bump version_added * lintfix --- ...74-xbps-support-rootdir-and-repository.yml | 2 + plugins/modules/xbps.py | 74 ++++++++++++++++++- 2 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9174-xbps-support-rootdir-and-repository.yml diff --git a/changelogs/fragments/9174-xbps-support-rootdir-and-repository.yml b/changelogs/fragments/9174-xbps-support-rootdir-and-repository.yml new file mode 100644 index 0000000000..9197607684 --- /dev/null +++ b/changelogs/fragments/9174-xbps-support-rootdir-and-repository.yml @@ -0,0 +1,2 @@ +minor_changes: + - xbps - add ``root`` and ``repository`` options to enable bootstrapping new void installations (https://github.com/ansible-collections/community.general/pull/9174). diff --git a/plugins/modules/xbps.py b/plugins/modules/xbps.py index bcbbb3f021..cd34029eba 100644 --- a/plugins/modules/xbps.py +++ b/plugins/modules/xbps.py @@ -67,6 +67,26 @@ options: type: bool default: true version_added: '0.2.0' + root: + description: + - The full path for the target root directory. + type: path + version_added: '10.2.0' + repositories: + description: + - Repository URL(s) to prepend to the repository list for the + package installation. + The URL can be a URL to a repository for + remote repositories or a path for local repositories. + type: list + elements: str + version_added: '10.2.0' + accept_pubkey: + description: + - Whether or not repository signing keys should be automatically accepted. + type: bool + default: false + version_added: '10.2.0' ''' EXAMPLES = ''' @@ -107,6 +127,30 @@ EXAMPLES = ''' name: foo state: present upgrade_xbps: false + +- name: Find repository keys to install into a new void system on a mounted partition + ansible.builtin.find: + path: /var/db/xbps/keys + pattern: "*.plist" + register: xbps_keys + +- name: Copy repository keys to into a new void system on a mounted partition + ansible.builtin.copy: + remote_src: true + src: "{{ item }}" + dest: "/mnt/{{ item }}" + owner: root + group: root + mode: "0644" + when: xbps_keys.matched > 0 + loop: "{{ xbps_keys.files | map(attribute='path') }}" + +- name: Install a new void system on a mounted partition + community.general.xbps: + name: base-system + state: present + repositories: https://repo-default.voidlinux.org/current + root: /mnt ''' RETURN = ''' @@ -133,16 +177,29 @@ def is_installed(xbps_output): return bool(len(xbps_output)) +def append_flags(module, xbps_path, cmd, skip_repo=False): + """Appends the repository/root flags when needed""" + if module.params["root"]: + cmd = "%s -r %s" % (cmd, module.params["root"]) + if module.params["repositories"] and not cmd.startswith(xbps_path["remove"]) and not skip_repo: + for repo in module.params["repositories"]: + cmd = "%s --repository=%s" % (cmd, repo) + + return cmd + + def query_package(module, xbps_path, name, state="present"): """Returns Package info""" if state == "present": lcmd = "%s %s" % (xbps_path['query'], name) + lcmd = append_flags(module, xbps_path, lcmd, skip_repo=True) lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) if not is_installed(lstdout): # package is not installed locally return False, False rcmd = "%s -Sun" % (xbps_path['install']) + rcmd = append_flags(module, xbps_path, rcmd) rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) if rrc == 0 or rrc == 17: """Return True to indicate that the package is installed locally, @@ -156,8 +213,15 @@ def query_package(module, xbps_path, name, state="present"): def update_package_db(module, xbps_path): """Returns True if update_package_db changed""" cmd = "%s -S" % (xbps_path['install']) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) + cmd = append_flags(module, xbps_path, cmd) + if module.params['accept_pubkey']: + stdin = "y\n" + else: + stdin = "n\n" + rc, stdout, stderr = module.run_command(cmd, check_rc=False, data=stdin) + if "Failed to import pubkey" in stderr: + module.fail_json(msg="Failed to import pubkey for repository") if rc != 0: module.fail_json(msg="Could not update package db") if "avg rate" in stdout: @@ -168,6 +232,7 @@ def update_package_db(module, xbps_path): def upgrade_xbps(module, xbps_path, exit_on_success=False): cmdupgradexbps = "%s -uy xbps" % (xbps_path['install']) + cmdupgradexbps = append_flags(module, xbps_path, cmdupgradexbps) rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False) if rc != 0: module.fail_json(msg='Could not upgrade xbps itself') @@ -177,6 +242,8 @@ def upgrade(module, xbps_path): """Returns true is full upgrade succeeds""" cmdupgrade = "%s -uy" % (xbps_path['install']) cmdneedupgrade = "%s -un" % (xbps_path['install']) + cmdupgrade = append_flags(module, xbps_path, cmdupgrade) + cmdneedupgrade = append_flags(module, xbps_path, cmdneedupgrade) rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) if rc == 0: @@ -210,6 +277,7 @@ def remove_packages(module, xbps_path, packages): continue cmd = "%s -y %s" % (xbps_path['remove'], package) + cmd = append_flags(module, xbps_path, cmd, skip_repo=True) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -242,6 +310,7 @@ def install_packages(module, xbps_path, state, packages): module.exit_json(changed=False, msg="Nothing to Install") cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall)) + cmd = append_flags(module, xbps_path, cmd) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 16 and module.params['upgrade_xbps']: @@ -308,6 +377,9 @@ def main(): upgrade=dict(default=False, type='bool'), update_cache=dict(default=True, type='bool'), upgrade_xbps=dict(default=True, type='bool'), + root=dict(type='path'), + repositories=dict(type='list', elements='str'), + accept_pubkey=dict(default=False, type='bool') ), required_one_of=[['name', 'update_cache', 'upgrade']], supports_check_mode=True) From 9df4ef9a9c4120eeebf9ed7a25e2e1154aa3f3b1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 13 Dec 2024 22:09:58 +0100 Subject: [PATCH 367/482] sysrc: add another exclusion for ezjail (#9243) Add another exclusion. --- tests/integration/targets/sysrc/tasks/main.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml index ace38202f2..25d7ca4d59 100644 --- a/tests/integration/targets/sysrc/tasks/main.yml +++ b/tests/integration/targets/sysrc/tasks/main.yml @@ -141,10 +141,12 @@ # # NOTE: currently fails with FreeBSD 12 with minor version less than 4 # NOTE: currently fails with FreeBSD 13 with minor version less than 2 + # NOTE: currently fails with FreeBSD 14 with minor version less than 1 # when: >- ansible_distribution_version is version('12.4', '>=') and ansible_distribution_version is version('13', '<') - or ansible_distribution_version is version('13.2', '>=') + or ansible_distribution_version is version('13.2', '>=') and ansible_distribution_version is version('14', '<') + or ansible_distribution_version is version('14.1', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml From 88ea025d1293312a4c74cfd3f795785f7e46653a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 14 Dec 2024 12:06:41 +1300 Subject: [PATCH 368/482] test helper improvements (#9242) --- tests/unit/plugins/modules/helper.py | 65 ++++++++++++------- tests/unit/plugins/modules/test_snap.py | 2 +- .../unit/plugins/modules/test_xfconf_info.py | 4 +- .../plugins/modules/test_xfconf_info.yaml | 5 +- 4 files changed, 50 insertions(+), 26 deletions(-) diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/helper.py index 0626e39f1c..8071bc2aa9 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/helper.py @@ -13,20 +13,25 @@ import yaml import pytest +from ansible.module_utils.common._collections_compat import Sequence + + class Helper(object): + TEST_SPEC_VALID_SECTIONS = ["anchors", "test_cases"] + @staticmethod - def from_list(test_module, ansible_module, test_cases): - helper = Helper(test_module, ansible_module, test_cases=test_cases) + def from_spec(test_module, ansible_module, test_spec, mocks=None): + helper = Helper(test_module, ansible_module, test_spec=test_spec, mocks=mocks) return helper @staticmethod - def from_file(test_module, ansible_module, filename): + def from_file(test_module, ansible_module, filename, mocks=None): with open(filename, "r") as test_cases: - test_cases_data = yaml.safe_load(test_cases) - return Helper.from_list(test_module, ansible_module, test_cases_data) + test_spec = yaml.safe_load(test_cases) + return Helper.from_spec(test_module, ansible_module, test_spec, mocks) @staticmethod - def from_module(ansible_module, test_module_name, test_spec=None): + def from_module(ansible_module, test_module_name, test_spec=None, mocks=None): test_module = sys.modules[test_module_name] if test_spec is None: test_spec = test_module.__file__.replace('.py', '.yaml') @@ -35,14 +40,22 @@ class Helper(object): def add_func_to_test_module(self, name, func): setattr(self.test_module, name, func) - def __init__(self, test_module, ansible_module, test_cases): + def __init__(self, test_module, ansible_module, test_spec, mocks=None): self.test_module = test_module self.ansible_module = ansible_module self.test_cases = [] self.fixtures = {} + if isinstance(test_spec, Sequence): + test_cases = test_spec + else: # it is a dict + test_cases = test_spec['test_cases'] + spec_diff = set(test_spec.keys()) - set(self.TEST_SPEC_VALID_SECTIONS) + if spec_diff: + raise ValueError("Test specification contain unknown keys: {0}".format(", ".join(spec_diff))) + self.mocks_map = {m.name: m for m in mocks} if mocks else {} for test_case in test_cases: - tc = ModuleTestCase.make_test_case(test_case, test_module) + tc = ModuleTestCase.make_test_case(test_case, test_module, self.mocks_map) self.test_cases.append(tc) self.fixtures.update(tc.fixtures) self.set_test_func() @@ -99,7 +112,7 @@ class ModuleTestCase: self.id = id self.input = input self.output = output - self._mocks = mocks + self.mock_specs = mocks self.mocks = {} self.flags = flags @@ -124,23 +137,23 @@ class ModuleTestCase: ) @staticmethod - def make_test_case(test_case, test_module): + def make_test_case(test_case_spec, test_module, mocks_map): tc = ModuleTestCase( - id=test_case["id"], - input=test_case.get("input", {}), - output=test_case.get("output", {}), - mocks=test_case.get("mocks", {}), - flags=test_case.get("flags", {}) + id=test_case_spec["id"], + input=test_case_spec.get("input", {}), + output=test_case_spec.get("output", {}), + mocks=test_case_spec.get("mocks", {}), + flags=test_case_spec.get("flags", {}) ) - tc.build_mocks(test_module) + tc.build_mocks(test_module, mocks_map) return tc - def build_mocks(self, test_module): - for mock, mock_spec in self._mocks.items(): - mock_class = self.get_mock_class(test_module, mock) - self.mocks[mock] = mock_class.build_mock(mock_spec) + def build_mocks(self, test_module, mocks_map): + for mock_name, mock_spec in self.mock_specs.items(): + mock_class = mocks_map.get(mock_name, self.get_mock_class(test_module, mock_name)) + self.mocks[mock_name] = mock_class.build_mock(mock_spec) - self._fixtures.update(self.mocks[mock].fixtures()) + self._fixtures.update(self.mocks[mock_name].fixtures()) @staticmethod def get_mock_class(test_module, mock): @@ -187,6 +200,10 @@ class ModuleTestCase: class TestCaseMock: + @property + def name(self): + raise NotImplementedError() + @classmethod def build_mock(cls, mock_specs): return cls(mock_specs) @@ -205,6 +222,10 @@ class TestCaseMock: class RunCommandMock(TestCaseMock): + @property + def name(self): + return "run_command" + def __str__(self): return "".format(specs=self.mock_specs) @@ -214,7 +235,7 @@ class RunCommandMock(TestCaseMock): def fixtures(self): @pytest.fixture def patch_bin(mocker): - def mockie(self, path, *args, **kwargs): + def mockie(self_, path, *args, **kwargs): return "/testbin/{0}".format(path) mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', mockie) diff --git a/tests/unit/plugins/modules/test_snap.py b/tests/unit/plugins/modules/test_snap.py index d70094551a..de7f35353a 100644 --- a/tests/unit/plugins/modules/test_snap.py +++ b/tests/unit/plugins/modules/test_snap.py @@ -475,4 +475,4 @@ TEST_CASES = [ ), ] -Helper.from_list(sys.modules[__name__], snap, TEST_CASES) +Helper.from_spec(sys.modules[__name__], snap, TEST_CASES) diff --git a/tests/unit/plugins/modules/test_xfconf_info.py b/tests/unit/plugins/modules/test_xfconf_info.py index 308f075490..4cdb92b305 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.py +++ b/tests/unit/plugins/modules/test_xfconf_info.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf_info -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .helper import Helper, RunCommandMock -Helper.from_module(xfconf_info, __name__) +Helper.from_module(xfconf_info, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_xfconf_info.yaml b/tests/unit/plugins/modules/test_xfconf_info.yaml index 26f77ce474..535e50602f 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.yaml +++ b/tests/unit/plugins/modules/test_xfconf_info.yaml @@ -4,6 +4,9 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: - id: test_simple_property_get input: channel: xfwm4 @@ -14,7 +17,7 @@ mocks: run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + environ: *env-def rc: 0 out: "100\n" err: "" From 34e8e8e5d18ab339556c922c28967d77faa0084c Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Sat, 14 Dec 2024 02:44:33 -0700 Subject: [PATCH 369/482] Fix VerifyBiosAttributes command on Systems with multiple entries (#9234) * Fix verify_bios_attributes command * Add changelog fragment * Update changelogs/fragments/9234-fix-verify-bios-attributes-multi-system.yml Improve fragment Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .../9234-fix-verify-bios-attributes-multi-system.yml | 2 ++ plugins/module_utils/redfish_utils.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/9234-fix-verify-bios-attributes-multi-system.yml diff --git a/changelogs/fragments/9234-fix-verify-bios-attributes-multi-system.yml b/changelogs/fragments/9234-fix-verify-bios-attributes-multi-system.yml new file mode 100644 index 0000000000..95bafed8d8 --- /dev/null +++ b/changelogs/fragments/9234-fix-verify-bios-attributes-multi-system.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi system resource nodes (https://github.com/ansible-collections/community.general/pull/9234). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index dd559921ae..253395ea93 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -3616,7 +3616,7 @@ class RedfishUtils(object): def verify_bios_attributes(self, bios_attributes): # This method verifies BIOS attributes against the provided input - server_bios = self.get_multi_bios_attributes() + server_bios = self.get_bios_attributes(self.systems_uri) if server_bios["ret"] is False: return server_bios @@ -3625,8 +3625,8 @@ class RedfishUtils(object): # Verify bios_attributes with BIOS settings available in the server for key, value in bios_attributes.items(): - if key in server_bios["entries"][0][1]: - if server_bios["entries"][0][1][key] != value: + if key in server_bios["entries"]: + if server_bios["entries"][key] != value: bios_dict.update({key: value}) else: wrong_param.update({key: value}) From f6dae1fc4d2edbb44871045e573c781d0911fa06 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 14 Dec 2024 14:56:05 +0100 Subject: [PATCH 370/482] CI: Fix some issues pointed out by zizmor (#9250) Fix some issues pointed out by zizmor. --- .github/workflows/codeql-analysis.yml | 2 ++ .github/workflows/reuse.yml | 1 + 2 files changed, 3 insertions(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c93162a72a..e8572fafb6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -25,6 +25,8 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml index 7c6f76bd5b..3c5e986e57 100644 --- a/.github/workflows/reuse.yml +++ b/.github/workflows/reuse.yml @@ -28,6 +28,7 @@ jobs: steps: - uses: actions/checkout@v4 with: + persist-credentials: false ref: ${{ github.event.pull_request.head.sha || '' }} - name: REUSE Compliance Check From 65827bdc965006a3448d6f2702320dd07914155e Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Mon, 16 Dec 2024 20:51:38 +0100 Subject: [PATCH 371/482] Drop myself from team_suse (#9259) I no longer work with ansible and/or SUSE so drop me from the team. --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ec9b9b7ddc..6896106906 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1565,6 +1565,6 @@ macros: team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor + team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor team_virt: joshainglis karmab Thulium-Drake Ajpantuso team_wdc: mikemoerk From 50b25f8c01f86e6467cd7228881163bdfeff2dbb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 17 Dec 2024 21:20:19 +0100 Subject: [PATCH 372/482] random_words integration tests: avoid test failure due to valid result (#9271) Avoid test failure due to valid result. --- tests/integration/targets/lookup_random_words/test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/lookup_random_words/test.yml b/tests/integration/targets/lookup_random_words/test.yml index 90c6727304..e1b6fde13b 100644 --- a/tests/integration/targets/lookup_random_words/test.yml +++ b/tests/integration/targets/lookup_random_words/test.yml @@ -27,6 +27,7 @@ - result4[0] | length >= 17 - result4[0] | length <= 29 - result4[0] | regex_findall("[A-Z]") | length == 3 - - result4[0].count("-") == 2 + # If one of the random words is 't-shirt', there are more than 2 dashes... + - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower() - result5 | length == 1 - result5[0] | length == 15 From dcdec6ee4e35891c7092e8222e5fccbedab3f481 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 19 Dec 2024 20:42:45 +1300 Subject: [PATCH 373/482] mh guide: update exception handling (#9282) --- docs/docsite/rst/guide_modulehelper.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst index 68b46e6c94..e3c7a124cf 100644 --- a/docs/docsite/rst/guide_modulehelper.rst +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -346,6 +346,8 @@ However, you can set output variables specifically for that exception, if you so .. code-block:: python + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException + def __init_module__(self): if not complex_validation(): self.do_raise("Validation failed!") @@ -354,11 +356,16 @@ However, you can set output variables specifically for that exception, if you so awesomeness = calculate_awesomeness() if awesomeness > 1000: self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + # which is just a convenience shortcut for + raise ModuleHelperException("...", update_output={...}) All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. However, if you do want to call ``self.module.fail_json()`` yourself it will work, just keep in mind that there will be no automatic handling of output variables in that case. +Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``. +If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed. + .. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: StateModuleHelper From d629a50b17f7087c9be85bc0eaa6867dbeb4697f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 19 Dec 2024 20:43:44 +1300 Subject: [PATCH 374/482] yarn: adjust docs (#9279) --- plugins/modules/yarn.py | 134 ++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 81 deletions(-) diff --git a/plugins/modules/yarn.py b/plugins/modules/yarn.py index c278951d5e..a2a3c471b7 100644 --- a/plugins/modules/yarn.py +++ b/plugins/modules/yarn.py @@ -11,12 +11,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: yarn -short_description: Manage node.js packages with Yarn +short_description: Manage Node.js packages with Yarn description: - - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/) + - Manage Node.js packages with the Yarn package manager U(https://yarnpkg.com/). author: - "David Gunter (@verkaufer)" - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)" @@ -31,25 +30,25 @@ options: name: type: str description: - - The name of a node.js library to install + - The name of a Node.js library to install. - If omitted all packages in package.json are installed. - - To globally install from local node.js library. Prepend "file:" to the path of the node.js library. + - To globally install from local Node.js library. Prepend C(file:) to the path of the Node.js library. required: false path: type: path description: - The base path where Node.js libraries will be installed. - - This is where the node_modules folder lives. + - This is where the C(node_modules) folder lives. required: false version: type: str description: - The version of the library to be installed. - - Must be in semver format. If "latest" is desired, use "state" arg instead + - Must be in semver format. If "latest" is desired, use O(state) arg instead. required: false global: description: - - Install the node.js library globally + - Install the Node.js library globally. required: false default: false type: bool @@ -60,14 +59,14 @@ options: required: false ignore_scripts: description: - - Use the --ignore-scripts flag when installing. + - Use the C(--ignore-scripts) flag when installing. required: false type: bool default: false production: description: - Install dependencies in production mode. - - Yarn will ignore any dependencies under devDependencies in package.json + - Yarn will ignore any dependencies under devDependencies in C(package.json). required: false type: bool default: false @@ -79,88 +78,61 @@ options: state: type: str description: - - Installation state of the named node.js library - - If absent is selected, a name option must be provided + - Installation state of the named Node.js library. + - If V(absent) is selected, a O(name) option must be provided. required: false default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] requirements: - - Yarn installed in bin path (typically /usr/local/bin) -''' + - Yarn installed in bin path (typically C(/usr/local/bin)) +""" -EXAMPLES = ''' -- name: Install "imagemin" node.js package. - community.general.yarn: - name: imagemin - path: /app/location +EXAMPLES = r""" + - name: Install "imagemin" Node.js package. + community.general.yarn: + name: imagemin + path: /app/location -- name: Install "imagemin" node.js package on version 5.3.1 - community.general.yarn: - name: imagemin - version: '5.3.1' - path: /app/location + - name: Install "imagemin" Node.js package on version 5.3.1 + community.general.yarn: + name: imagemin + version: '5.3.1' + path: /app/location -- name: Install "imagemin" node.js package globally. - community.general.yarn: - name: imagemin - global: true + - name: Install "imagemin" Node.js package globally. + community.general.yarn: + name: imagemin + global: true -- name: Remove the globally-installed package "imagemin". - community.general.yarn: - name: imagemin - global: true - state: absent + - name: Remove the globally-installed package "imagemin". + community.general.yarn: + name: imagemin + global: true + state: absent -- name: Install "imagemin" node.js package from custom registry. - community.general.yarn: - name: imagemin - registry: 'http://registry.mysite.com' + - name: Install "imagemin" Node.js package from custom registry. + community.general.yarn: + name: imagemin + registry: 'http://registry.mysite.com' -- name: Install packages based on package.json. - community.general.yarn: - path: /app/location + - name: Install packages based on package.json. + community.general.yarn: + path: /app/location -- name: Update all packages in package.json to their latest version. - community.general.yarn: - path: /app/location - state: latest -''' + - name: Update all packages in package.json to their latest version. + community.general.yarn: + path: /app/location + state: latest +""" -RETURN = ''' -changed: - description: Whether Yarn changed any package data - returned: always - type: bool - sample: true -msg: - description: Provides an error message if Yarn syntax was incorrect - returned: failure - type: str - sample: "Package must be explicitly named when uninstalling." -invocation: - description: Parameters and values used during execution - returned: success - type: dict - sample: { - "module_args": { - "executable": null, - "globally": false, - "ignore_scripts": false, - "name": null, - "path": "/some/path/folder", - "production": false, - "registry": null, - "state": "present", - "version": null - } - } +RETURN = r""" out: - description: Output generated from Yarn. - returned: always - type: str - sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] - Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." -''' + description: Output generated from Yarn. + returned: always + type: str + sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] Building fresh packages...success + Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." +""" import os import json From c5855d1a58d91994105d7ea07e23fa8554c7fa38 Mon Sep 17 00:00:00 2001 From: xilmen Date: Fri, 20 Dec 2024 22:52:50 +0100 Subject: [PATCH 375/482] =?UTF-8?q?Clean=20up=20Proxmox=20API=20token=20ha?= =?UTF-8?q?ndling=20by=20stripping=20whitespace=20and=20forma=E2=80=A6=20(?= =?UTF-8?q?#9228)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Clean up Proxmox API token handling by stripping whitespace and formatting the token string * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Update changelogs/fragments/9228-fix-issue-header.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/9228-fix-issue-header.yml Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- changelogs/fragments/9228-fix-issue-header.yml | 2 ++ plugins/inventory/proxmox.py | 17 +++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/9228-fix-issue-header.yml diff --git a/changelogs/fragments/9228-fix-issue-header.yml b/changelogs/fragments/9228-fix-issue-header.yml new file mode 100644 index 0000000000..450a23f8e5 --- /dev/null +++ b/changelogs/fragments/9228-fix-issue-header.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - strip whitespace from ``user``, ``token_id``, and ``token_secret`` (https://github.com/ansible-collections/community.general/issues/9227, https://github.com/ansible-collections/community.general/pull/9228/). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 3ce4f789a3..38877b895c 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -275,7 +275,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return self.session def _get_auth(self): - validate_certs = self.get_option('validate_certs') if validate_certs is False: @@ -283,24 +282,26 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): disable_warnings() if self.proxmox_password: - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) - a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) - json = ret.json() - self.headers = { # only required for POST/PUT/DELETE methods, which we are not using currently # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket']) } - else: + # Clean and format token components + user = self.proxmox_user.strip() + token_id = self.proxmox_token_id.strip() + token_secret = self.proxmox_token_secret.strip() - self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)} + # Build token string without newlines + token = f'{user}!{token_id}={token_secret}' + + # Set headers with clean token + self.headers = {'Authorization': f'PVEAPIToken={token}'} def _get_json(self, url, ignore_errors=None): From 9452a2c8ac06cd12446d5fe500d6c24506f62075 Mon Sep 17 00:00:00 2001 From: Thibaut Decombe <68703331+UnknownPlatypus@users.noreply.github.com> Date: Fri, 20 Dec 2024 22:53:41 +0100 Subject: [PATCH 376/482] homebrew: fix incorrect handling of aliases (#9255) * Add failing test (See commit description) Second assert returns this: changed: [localhost] => changed=true changed_pkgs: - sqlite3 msg: 'Changed: 1, Unchanged: 1' unchanged_pkgs: - sqlite * Extract proper package_name from brew info using alisases * Add changelog fragment * Fix pep8 * Make sure sqlite is uninstalled beforehand * Use `package_result is (not) changed` syntax in assertions * Register more explicit names * Fix handling of casks --- ...-handling-of-aliased-homebrew-packages.yml | 2 + plugins/modules/homebrew.py | 27 +++++-- .../targets/homebrew/tasks/casks.yml | 8 +- .../targets/homebrew/tasks/formulae.yml | 74 +++++++++++++++---- 4 files changed, 83 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/9255-fix-handling-of-aliased-homebrew-packages.yml diff --git a/changelogs/fragments/9255-fix-handling-of-aliased-homebrew-packages.yml b/changelogs/fragments/9255-fix-handling-of-aliased-homebrew-packages.yml new file mode 100644 index 0000000000..350e81af8e --- /dev/null +++ b/changelogs/fragments/9255-fix-handling-of-aliased-homebrew-packages.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew - fix incorrect handling of aliased homebrew modules when the alias is requested (https://github.com/ansible-collections/community.general/pull/9255, https://github.com/ansible-collections/community.general/issues/9240). \ No newline at end of file diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 8eb1b9d689..980b5cf656 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -379,6 +379,20 @@ class Homebrew(object): ) raise HomebrewException(self.message) + def _save_package_info(self, package_detail, package_name): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_name) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_name) + + def _extract_package_name(self, package_detail, is_cask): + canonical_name = package_detail["token"] if is_cask else package_detail["name"] # For ex: 'sqlite' + all_valid_names = set(package_detail.get("aliases", [])) # For ex: {'sqlite3'} + all_valid_names.add(canonical_name) + + # Then make sure the user provided name resurface. + return (all_valid_names & set(self.packages)).pop() + def _get_packages_info(self): cmd = [ "{brew_path}".format(brew_path=self.brew_path), @@ -397,16 +411,13 @@ class Homebrew(object): data = json.loads(out) for package_detail in data.get("formulae", []): - if bool(package_detail.get("installed")): - self.installed_packages.add(package_detail["name"]) - if bool(package_detail.get("outdated")): - self.outdated_packages.add(package_detail["name"]) + package_name = self._extract_package_name(package_detail, is_cask=False) + self._save_package_info(package_detail, package_name) for package_detail in data.get("casks", []): - if bool(package_detail.get("installed")): - self.installed_packages.add(package_detail["token"]) - if bool(package_detail.get("outdated")): - self.outdated_packages.add(package_detail["token"]) + package_name = self._extract_package_name(package_detail, is_cask=True) + self._save_package_info(package_detail, package_name) + # /prep -------------------------------------------------------- }}} def run(self): diff --git a/tests/integration/targets/homebrew/tasks/casks.yml b/tests/integration/targets/homebrew/tasks/casks.yml index ffbe67d158..50824a9e9f 100644 --- a/tests/integration/targets/homebrew/tasks/casks.yml +++ b/tests/integration/targets/homebrew/tasks/casks.yml @@ -55,7 +55,7 @@ - assert: that: - - package_result.changed + - package_result is changed - name: Again install {{ package_name }} package using homebrew homebrew: @@ -68,7 +68,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - name: Uninstall {{ package_name }} package using homebrew homebrew: @@ -81,7 +81,7 @@ - assert: that: - - package_result.changed + - package_result is changed - name: Again uninstall {{ package_name }} package using homebrew homebrew: @@ -94,4 +94,4 @@ - assert: that: - - not package_result.changed + - package_result is not changed diff --git a/tests/integration/targets/homebrew/tasks/formulae.yml b/tests/integration/targets/homebrew/tasks/formulae.yml index 21276e3a2e..1559ba5dd8 100644 --- a/tests/integration/targets/homebrew/tasks/formulae.yml +++ b/tests/integration/targets/homebrew/tasks/formulae.yml @@ -55,7 +55,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Package installed: gnu-tar'" - "package_result.changed_pkgs == ['gnu-tar']" - "package_result.unchanged_pkgs == []" @@ -71,7 +71,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Package already installed: gnu-tar'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs == ['gnu-tar']" @@ -87,7 +87,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Package unlinked: gnu-tar'" - "package_result.changed_pkgs == ['gnu-tar']" - "package_result.unchanged_pkgs == []" @@ -103,7 +103,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Package linked: gnu-tar'" - "package_result.changed_pkgs == ['gnu-tar']" - "package_result.unchanged_pkgs == []" @@ -119,7 +119,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Package uninstalled: gnu-tar'" - "package_result.changed_pkgs == ['gnu-tar']" - "package_result.unchanged_pkgs == []" @@ -135,7 +135,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Package already uninstalled: gnu-tar'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs == ['gnu-tar']" @@ -151,7 +151,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Package upgraded: gnu-tar'" - "package_result.changed_pkgs == ['gnu-tar']" - "package_result.unchanged_pkgs == []" @@ -167,7 +167,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Package already upgraded: gnu-tar'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs == ['gnu-tar']" @@ -205,7 +205,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Changed: 1, Unchanged: 1'" - "package_result.changed_pkgs == ['gnu-time']" - "package_result.unchanged_pkgs == ['gnu-tar']" @@ -221,7 +221,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Changed: 0, Unchanged: 2'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" @@ -237,7 +237,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Changed: 2, Unchanged: 0'" - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" - "package_result.unchanged_pkgs == []" @@ -253,7 +253,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Changed: 2, Unchanged: 0'" - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" - "package_result.unchanged_pkgs == []" @@ -269,7 +269,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Changed: 2, Unchanged: 0'" - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" - "package_result.unchanged_pkgs == []" @@ -285,7 +285,7 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Changed: 0, Unchanged: 2'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" @@ -301,7 +301,7 @@ - assert: that: - - package_result.changed + - package_result is changed - "package_result.msg == 'Changed: 2, Unchanged: 0'" - "package_result.changed_pkgs | sort == ['gnu-tar', 'gnu-time']" - "package_result.unchanged_pkgs == []" @@ -317,7 +317,49 @@ - assert: that: - - not package_result.changed + - package_result is not changed - "package_result.msg == 'Changed: 0, Unchanged: 2'" - "package_result.changed_pkgs == []" - "package_result.unchanged_pkgs | sort == ['gnu-tar', 'gnu-time']" + +# Test alias handling with sqlite (that is aliased to sqlite3) +- block: + - name: Make sure sqlite package is not installed + homebrew: + name: "sqlite" + state: absent + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + + - name: Install sqlite package using alias (sqlite3) + homebrew: + name: "sqlite3" + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: install_result + + - assert: + that: + - install_result is changed + - "install_result.msg == 'Package installed: sqlite3'" + - "install_result.changed_pkgs == ['sqlite3']" + - "install_result.unchanged_pkgs == []" + + - name: Again install sqlite package using alias (sqlite3) + homebrew: + name: "sqlite3" + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + register: reinstall_result + + - assert: + that: + - reinstall_result is not changed + - "reinstall_result.msg == 'Package already installed: sqlite3'" + - "reinstall_result.changed_pkgs == []" + - "reinstall_result.unchanged_pkgs == ['sqlite3']" From 2682ec47d9edf21a1ea4ed99a81c00cf857e636a Mon Sep 17 00:00:00 2001 From: Florian Apolloner Date: Fri, 20 Dec 2024 22:54:15 +0100 Subject: [PATCH 377/482] keycloak_authentication: Fix priority attribute during execution updates (#9263) keycloak_authentication: Fix priority attribute during execution updates. --- .../9263-kc_authentication-api-priority.yaml | 2 ++ plugins/modules/keycloak_authentication.py | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/9263-kc_authentication-api-priority.yaml diff --git a/changelogs/fragments/9263-kc_authentication-api-priority.yaml b/changelogs/fragments/9263-kc_authentication-api-priority.yaml new file mode 100644 index 0000000000..a943e659ad --- /dev/null +++ b/changelogs/fragments/9263-kc_authentication-api-priority.yaml @@ -0,0 +1,2 @@ +security_fixes: + - keycloak_authentication - API calls did not properly set the ``priority`` during update resulting in incorrectly sorted authentication flows. This apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263). \ No newline at end of file diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py index bc2898d9be..5945890bb7 100644 --- a/plugins/modules/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -257,6 +257,7 @@ def create_or_update_executions(kc, config, realm='master'): changed = False after = "" before = "" + execution = None if "authenticationExecutions" in config: # Get existing executions on the Keycloak server for this alias existing_executions = kc.get_executions_representation(config, realm=realm) @@ -283,27 +284,27 @@ def create_or_update_executions(kc, config, realm='master'): if new_exec['index'] is None: new_exec_index = exec_index before += str(existing_executions[exec_index]) + '\n' - id_to_update = existing_executions[exec_index]["id"] + execution = existing_executions[exec_index].copy() # Remove exec from list in case 2 exec with same name existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' elif new_exec["displayName"] is not None: kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' if exec_found: changed = True if exec_index != -1: # Update the existing execution updated_exec = { - "id": id_to_update + "id": execution["id"] } # add the execution configuration if new_exec["authenticationConfig"] is not None: @@ -313,6 +314,8 @@ def create_or_update_executions(kc, config, realm='master'): if key not in ("flowAlias", "authenticationConfig", "subFlowType"): updated_exec[key] = new_exec[key] if new_exec["requirement"] is not None: + if "priority" in execution: + updated_exec["priority"] = execution["priority"] kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) diff = exec_index - new_exec_index kc.change_execution_priority(updated_exec["id"], diff, realm=realm) From 2b2872f0efe7f2fcf2c8c0e2b40764d940c79dff Mon Sep 17 00:00:00 2001 From: Stanislav Shamilov Date: Fri, 20 Dec 2024 23:55:12 +0200 Subject: [PATCH 378/482] Add android sdk module (#9236) * adds simple implementation of adding and removing android sdk packages * adds package update * adds simple installed packages parsing * moves parsing logic to a separate class * adds absent state for sdkmanager packages and setup for tests * adds output for installing and removing packages * removes version from Package object since it is not possible to specify version for a package while using sdkmanager * adds 'latest' state * adds tests * fixes crash when sdkmanager is invoked from python with LC_ALL=C * fixes latest state * adds sdk_root parameter * adds channel parameter * simplifies regexps, removes unused named groups * minor refactoring of sdkmanager parsing * adds java dependency variable for different distributions * adds RETURN documentation * adds check for nonexisting package * adds check for non-accepted licenses * removes excessive methods from sdkmanager * removes unused 'update' module parameter, packages may be updated using 'latest' state * minor refactoring * adds EXAMPLES doc section * adds DOCUMENTATION section and license headers * fixes formatting issues * removes diff_params * adds maintainer * fixes sanity check issues in sdkmanager * adds java dependency for macos and moves some tests to a separate FreeBSD configuration * fixes dependencies setup for OSX * fixes dependencies setup for OSX (2) * fixes dependencies setup for OSX (3) * Apply minor suggestions from code review Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * applies code review suggestions * changes force_lang from C.UTF-8 to auto in sdkmanager (as per discussion https://github.com/ansible-collections/community.general/pull/9236#discussion_r1881114326) * Revert "changes force_lang from C.UTF-8 to auto in sdkmanager (as per discussion https://github.com/ansible-collections/community.general/pull/9236#discussion_r1881114326)" This reverts commit 619f28dd58db005e466a19b98604221da82b7ecc. * fixes some more comments from review * minor sanity issue fix * uses the 'changed' test instead of checking the 'changed' attribute * adds 'accept_licenses' parameter. Installation is now performed independently for each package specified. * removes "Accept licenses" task from examples * fixes docs sanity issues * applies minor suggestions from code review * fixes regexps. The previous version didn't match versions like "32.1.0 rc1". Also, this allows to simplify the parsing logic as there is no need to skip table headers anymore. * renamed sdkmanager.py to android_sdkmanager.py * applies minor suggestions from code review Co-authored-by: Felix Fontein * updates BOTMETA * reordered BOTMETA --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 4 + plugins/module_utils/android_sdkmanager.py | 148 ++++++++++++ plugins/modules/android_sdk.py | 213 ++++++++++++++++++ tests/integration/targets/android_sdk/aliases | 7 + .../targets/android_sdk/meta/main.yml | 8 + .../android_sdk/tasks/default-tests.yml | 92 ++++++++ .../android_sdk/tasks/freebsd-tests.yml | 72 ++++++ .../targets/android_sdk/tasks/main.yml | 31 +++ .../targets/android_sdk/tasks/setup.yml | 86 +++++++ .../targets/android_sdk/vars/Alpine.yml | 6 + .../targets/android_sdk/vars/Archlinux.yml | 6 + .../targets/android_sdk/vars/Darwin.yml | 6 + .../targets/android_sdk/vars/Debian.yml | 6 + .../targets/android_sdk/vars/FreeBSD.yml | 6 + .../targets/android_sdk/vars/RedHat.yml | 6 + .../targets/android_sdk/vars/Suse.yml | 6 + .../targets/android_sdk/vars/main.yml | 8 + 17 files changed, 711 insertions(+) create mode 100644 plugins/module_utils/android_sdkmanager.py create mode 100644 plugins/modules/android_sdk.py create mode 100644 tests/integration/targets/android_sdk/aliases create mode 100644 tests/integration/targets/android_sdk/meta/main.yml create mode 100644 tests/integration/targets/android_sdk/tasks/default-tests.yml create mode 100644 tests/integration/targets/android_sdk/tasks/freebsd-tests.yml create mode 100644 tests/integration/targets/android_sdk/tasks/main.yml create mode 100644 tests/integration/targets/android_sdk/tasks/setup.yml create mode 100644 tests/integration/targets/android_sdk/vars/Alpine.yml create mode 100644 tests/integration/targets/android_sdk/vars/Archlinux.yml create mode 100644 tests/integration/targets/android_sdk/vars/Darwin.yml create mode 100644 tests/integration/targets/android_sdk/vars/Debian.yml create mode 100644 tests/integration/targets/android_sdk/vars/FreeBSD.yml create mode 100644 tests/integration/targets/android_sdk/vars/RedHat.yml create mode 100644 tests/integration/targets/android_sdk/vars/Suse.yml create mode 100644 tests/integration/targets/android_sdk/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6896106906..2be4619ecb 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -309,6 +309,8 @@ files: maintainers: delineaKrehl tylerezimmerman $module_utils/: labels: module_utils + $module_utils/android_sdkmanager.py: + maintainers: shamilovstas $module_utils/btrfs.py: maintainers: gnfzdz $module_utils/cmd_runner_fmt.py: @@ -420,6 +422,8 @@ files: ignore: DavidWittman jiuka labels: alternatives maintainers: mulby + $modules/android_sdk.py: + maintainers: shamilovstas $modules/ansible_galaxy_install.py: maintainers: russoz $modules/apache2_mod_proxy.py: diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py new file mode 100644 index 0000000000..9cbb2df6b0 --- /dev/null +++ b/plugins/module_utils/android_sdkmanager.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +__state_map = { + "present": "--install", + "absent": "--uninstall" +} + +# sdkmanager --help 2>&1 | grep -A 2 -- --channel +__channel_map = { + "stable": 0, + "beta": 1, + "dev": 2, + "canary": 3 +} + + +def __map_channel(channel_name): + if channel_name not in __channel_map: + raise ValueError("Unknown channel name '%s'" % channel_name) + return __channel_map[channel_name] + + +def sdkmanager_runner(module, **kwargs): + return CmdRunner( + module, + command='sdkmanager', + arg_formats=dict( + state=cmd_runner_fmt.as_map(__state_map), + name=cmd_runner_fmt.as_list(), + installed=cmd_runner_fmt.as_fixed("--list_installed"), + list=cmd_runner_fmt.as_fixed('--list'), + newer=cmd_runner_fmt.as_fixed("--newer"), + sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"), + channel=cmd_runner_fmt.as_func(lambda x: ["{0}={1}".format("--channel", __map_channel(x))]) + ), + force_lang="C.UTF-8", # Without this, sdkmanager binary crashes + **kwargs + ) + + +class Package: + def __init__(self, name): + self.name = name + + def __hash__(self): + return hash(self.name) + + def __ne__(self, other): + if not isinstance(other, Package): + return True + return self.name != other.name + + def __eq__(self, other): + if not isinstance(other, Package): + return False + + return self.name == other.name + + +class SdkManagerException(Exception): + pass + + +class AndroidSdkManager(object): + _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$') + _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$') + + # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools ' + _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$') + + # Example: ' platform-tools | 27.0.0 | 35.0.2' + _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$') + + _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$') + _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of ' + r'the packages they depend on were not accepted') + + def __init__(self, module): + self.runner = sdkmanager_runner(module) + + def get_installed_packages(self): + with self.runner('installed sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE) + + def get_updatable_packages(self): + with self.runner('list newer sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE) + + def apply_packages_changes(self, packages, accept_licenses=False): + """ Install or delete packages, depending on the `module.vars.state` parameter """ + if len(packages) == 0: + return 0, '', '' + + if accept_licenses: + license_prompt_answer = 'y' + else: + license_prompt_answer = 'N' + for package in packages: + with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx: + rc, stdout, stderr = ctx.run(name=package.name) + + for line in stdout.splitlines(): + if self._RE_ACCEPT_LICENSE.match(line): + raise SdkManagerException("Licenses for some packages were not accepted") + + if rc != 0: + self._try_parse_stderr(stderr) + return rc, stdout, stderr + return 0, '', '' + + def _try_parse_stderr(self, stderr): + data = stderr.splitlines() + for line in data: + unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line) + if unknown_package_regex: + package = unknown_package_regex.group('package') + raise SdkManagerException("Unknown package %s" % package) + + @staticmethod + def _parse_packages(stdout, header_regexp, row_regexp): + data = stdout.splitlines() + + section_found = False + packages = set() + + for line in data: + if not section_found: + section_found = header_regexp.match(line) + continue + else: + p = row_regexp.match(line) + if p: + packages.add(Package(p.group('name'))) + return packages diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py new file mode 100644 index 0000000000..9851a84fc2 --- /dev/null +++ b/plugins/modules/android_sdk.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: android_sdk +short_description: Manages Android SDK packages +description: + - Manages Android SDK packages. + - Allows installation from different channels (stable, beta, dev, canary). + - Allows installation of packages to a non-default SDK root directory. +author: Stanislav Shamilov (@shamilovstas) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +version_added: 10.2.0 +options: + accept_licenses: + description: + - If this is set to B(true), the module will try to accept license prompts generated by C(sdkmanager) during + package installation. Otherwise, every license prompt will be rejected. + type: bool + default: false + name: + description: + - A name of an Android SDK package (for instance, V(build-tools;34.0.0)). + aliases: ['package', 'pkg'] + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - V(present) ensures that package(s) is/are present. + - V(absent) ensures that package(s) is/are absent. + - V(latest) ensures that package(s) is/are installed and updated to the latest version(s). + choices: ['present', 'absent', 'latest'] + default: present + type: str + sdk_root: + description: + - Provides path for an alternative directory to install Android SDK packages to. By default, all packages + are installed to the directory where C(sdkmanager) is installed. + type: path + channel: + description: + - Indicates what channel must C(sdkmanager) use for installation of packages. + choices: ['stable', 'beta', 'dev', 'canary'] + default: stable + type: str +requirements: + - C(java) >= 17 + - C(sdkmanager) Command line tool for installing Android SDK packages. +notes: + - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through + command line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are + several packages requested for installation and at least two of them belong to different licenses, the C(sdkmanager) + tool will prompt for these licenses in a loop. + In order to install packages, the module must be able to answer these license prompts. Currently, it is only + possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a single + invocation of the C(sdkmanager --install) command, it will be done by executing the command independently for each + package. This makes sure that at most only one license prompt will need to be answered. + At the time of writing this module, a C(sdkmanager)'s package may belong to at most one license type that needs to + be accepted. However, if this is changes in the future, the module may hang as there might be more prompts generated + by the C(sdkmanager) tool which the module will not be able to answer. If this is the case, file an issue and in the + meantime, consider accepting all the licenses in advance, as it is described in the C(sdkmanager) + L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance, using the + M(ansible.builtin.command) module. +seealso: + - name: sdkmanager tool documentation + description: Detailed information of how to install and use sdkmanager command line tool. + link: https://developer.android.com/tools/sdkmanager +''' + +EXAMPLES = r''' +- name: Install build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + +- name: Install build-tools;34.0.0 and platform-tools + community.general.android_sdk: + name: + - build-tools;34.0.0 + - platform-tools + accept_licenses: true + state: present + +- name: Delete build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + state: absent + +- name: Install platform-tools or update if installed + community.general.android_sdk: + name: platform-tools + accept_licenses: true + state: latest + +- name: Install build-tools;34.0.0 to a different SDK root + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + sdk_root: "/path/to/new/root" + +- name: Install a package from another channel + community.general.android_sdk: + name: some-package-present-in-canary-channel + accept_licenses: true + state: present + channel: canary +''' + +RETURN = r''' +installed: + description: a list of packages that have been installed + returned: when packages have changed + type: list + sample: ['build-tools;34.0.0', 'platform-tools'] + +removed: + description: a list of packages that have been removed + returned: when packages have changed + type: list + sample: ['build-tools;34.0.0', 'platform-tools'] +''' + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.android_sdkmanager import Package, AndroidSdkManager + + +class AndroidSdk(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'latest']), + package=dict(type='list', elements='str', aliases=['pkg', 'name']), + sdk_root=dict(type='path'), + channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']), + accept_licenses=dict(type='bool', default=False) + ), + supports_check_mode=True + ) + use_old_vardict = False + + def __init_module__(self): + self.sdkmanager = AndroidSdkManager(self.module) + self.vars.set('installed', [], change=True) + self.vars.set('removed', [], change=True) + + def _parse_packages(self): + arg_pkgs = set(self.vars.package) + if len(arg_pkgs) < len(self.vars.package): + self.do_raise("Packages may not repeat") + return set([Package(p) for p in arg_pkgs]) + + def state_present(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + pending_installation = packages.difference(installed) + + self.vars.installed = AndroidSdk._map_packages_to_names(pending_installation) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(pending_installation, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + def state_absent(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + to_be_deleted = packages.intersection(installed) + self.vars.removed = AndroidSdk._map_packages_to_names(to_be_deleted) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_deleted) + if rc != 0: + self.do_raise("Could not uninstall packages: %s" % stderr) + + def state_latest(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + updatable = self.sdkmanager.get_updatable_packages() + not_installed = packages.difference(installed) + to_be_installed = not_installed.union(updatable) + self.vars.installed = AndroidSdk._map_packages_to_names(to_be_installed) + + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_installed, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + @staticmethod + def _map_packages_to_names(packages): + return [x.name for x in packages] + + +def main(): + AndroidSdk.execute() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/android_sdk/aliases b/tests/integration/targets/android_sdk/aliases new file mode 100644 index 0000000000..bb79889366 --- /dev/null +++ b/tests/integration/targets/android_sdk/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +destructive +needs/root \ No newline at end of file diff --git a/tests/integration/targets/android_sdk/meta/main.yml b/tests/integration/targets/android_sdk/meta/main.yml new file mode 100644 index 0000000000..d7c152feeb --- /dev/null +++ b/tests/integration/targets/android_sdk/meta/main.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_pkg_mgr + - setup_remote_tmp_dir \ No newline at end of file diff --git a/tests/integration/targets/android_sdk/tasks/default-tests.yml b/tests/integration/targets/android_sdk/tasks/default-tests.yml new file mode 100644 index 0000000000..b8cb6df54d --- /dev/null +++ b/tests/integration/targets/android_sdk/tasks/default-tests.yml @@ -0,0 +1,92 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install build-tools;34.0.0 + android_sdk: + accept_licenses: true + name: build-tools;34.0.0 + state: present + register: build_tools_installed + +- name: Install build-tools;34.0.0 second time + android_sdk: + name: build-tools;34.0.0 + state: present + register: build_tools_installed2 + +- name: Stat build-tools + stat: + path: "{{ android_sdk_location }}/build-tools/34.0.0" + register: build_tools_34_0_0 + +- name: Delete build-tools;34.0.0 + android_sdk: + name: build-tools;34.0.0 + state: absent + register: build_tools_deleted + +- name: Delete build-tools;34.0.0 second time + android_sdk: + name: build-tools;34.0.0 + state: absent + register: build_tools_deleted2 + +- name: Download old platform-tools + unarchive: + src: https://dl.google.com/android/repository/platform-tools_r27.0.0-linux.zip + remote_src: true + dest: "{{ android_sdk_location }}" + +- name: Try installing platform-tools from sdkmanager + android_sdk: + name: platform-tools + accept_licenses: true + state: present + register: platform_tools_present + +- name: Install (update) platform-tools + android_sdk: + name: platform-tools + state: latest + register: platform_tools_updated + +- name: Install a package to a new root + android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + sdk_root: "{{ remote_tmp_dir }}" + register: new_root_package + +- name: Check package is installed + stat: + path: "{{ remote_tmp_dir }}/build-tools/34.0.0" + register: new_root_package_stat + +- name: Install a package from canary channel + android_sdk: + name: build-tools;33.0.0 + state: present + channel: canary + register: package_canary + +- name: Run tests + assert: + that: + - build_tools_34_0_0.stat.exists + - build_tools_installed is changed + - build_tools_installed2 is not changed + - build_tools_deleted is changed + - build_tools_deleted2 is not changed + - platform_tools_present is not changed + - platform_tools_updated is changed + - new_root_package is changed + - new_root_package_stat.stat.exists + - package_canary is changed \ No newline at end of file diff --git a/tests/integration/targets/android_sdk/tasks/freebsd-tests.yml b/tests/integration/targets/android_sdk/tasks/freebsd-tests.yml new file mode 100644 index 0000000000..f1886f245d --- /dev/null +++ b/tests/integration/targets/android_sdk/tasks/freebsd-tests.yml @@ -0,0 +1,72 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install sources;android-26 (FreeBSD) + android_sdk: + name: sources;android-26 + accept_licenses: true + state: present + register: sources_android_26_installed + +- name: Install sources;android-26 (FreeBSD) + android_sdk: + name: sources;android-26 + state: present + register: sources_android_26_installed2 + +- name: Stat build-tools (FreeBSD) + stat: + path: "{{ android_sdk_location }}/sources/android-26" + register: sources_android_26 + +- name: Delete sources;android-26 (FreeBSD) + android_sdk: + name: sources;android-26 + state: absent + register: sources_android_26_deleted + +- name: Delete sources;android-26 second time (FreeBSD) + android_sdk: + name: sources;android-26 + state: absent + register: sources_android_26_deleted2 + +- name: Install a package to a new root (FreeBSD) + android_sdk: + name: sources;android-26 + accept_licenses: true + state: present + sdk_root: "{{ remote_tmp_dir }}" + register: new_root_package + +- name: Check package is installed (FreeBSD) + stat: + path: "{{ remote_tmp_dir }}/sources/android-26" + register: new_root_package_stat + +- name: Install a package from canary channel (FreeBSD) + android_sdk: + name: sources;android-26 + accept_licenses: true + state: present + channel: canary + register: package_canary + +- name: Run tests (FreeBSD) + assert: + that: + - sources_android_26.stat.exists + - sources_android_26_installed is changed + - sources_android_26_installed2 is not changed + - sources_android_26_deleted is changed + - sources_android_26_deleted2 is not changed + - new_root_package is changed + - new_root_package_stat.stat.exists + - package_canary is changed diff --git a/tests/integration/targets/android_sdk/tasks/main.yml b/tests/integration/targets/android_sdk/tasks/main.yml new file mode 100644 index 0000000000..46cf3192e1 --- /dev/null +++ b/tests/integration/targets/android_sdk/tasks/main.yml @@ -0,0 +1,31 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# java >= 17 is not available in RHEL and CentOS7 repos, which is required for sdkmanager to run +- name: Bail out if not supported + when: + - "ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<')" + ansible.builtin.meta: end_play + +- name: Run android_sdk tests + environment: + PATH: '{{ ansible_env.PATH }}:{{ android_sdk_location }}/cmdline-tools/latest/bin' + block: + - import_tasks: setup.yml + + - name: Run default tests + import_tasks: default-tests.yml + when: ansible_os_family != 'FreeBSD' + + # Most of the important Android SDK packages are not available on FreeBSD (like, build-tools, platform-tools and so on), + # but at least some of the functionality can be tested (like, downloading sources) + - name: Run FreeBSD tests + import_tasks: freebsd-tests.yml + when: ansible_os_family == 'FreeBSD' diff --git a/tests/integration/targets/android_sdk/tasks/setup.yml b/tests/integration/targets/android_sdk/tasks/setup.yml new file mode 100644 index 0000000000..ff2e3eb3cf --- /dev/null +++ b/tests/integration/targets/android_sdk/tasks/setup.yml @@ -0,0 +1,86 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Include OS-specific variables + include_vars: '{{ ansible_os_family }}.yml' + +- name: Install dependencies + become: true + package: + name: + - "{{ openjdk_pkg }}" + - unzip + state: present + when: ansible_os_family != 'Darwin' + +- name: Install dependencies (OSX) + block: + - name: Find brew binary + command: which brew + register: brew_which + - name: Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + - name: "Install package" + homebrew: + name: + - "{{ openjdk_pkg }}" + - unzip + state: present + update_homebrew: false + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + environment: + HOMEBREW_NO_AUTO_UPDATE: "True" + - name: Symlink java + become: true + file: + src: "/usr/local/opt/openjdk@17/libexec/openjdk.jdk" + dest: "/Library/Java/JavaVirtualMachines/openjdk-17.jdk" + state: link + when: + - ansible_os_family == 'Darwin' + +- name: Create Android SDK directory + file: + path: "{{ android_sdk_location }}" + state: directory + +- name: Check that sdkmanager is installed + stat: + path: "{{ android_sdk_location }}/cmdline-tools/latest/bin/sdkmanager" + register: sdkmanager_installed + +- name: Install Android command line tools + when: not sdkmanager_installed.stat.exists + block: + - name: Create Android SDK dir structure + file: + path: "{{ item.path }}" + state: "{{ item.state }}" + with_items: + - { path: "{{ android_cmdline_temp_dir }}", state: "directory" } + - { path: "{{ android_sdk_location }}/cmdline-tools/latest", state: "directory" } + + - name: Download Android command line tools + unarchive: + src: "{{ commandline_tools_link }}" + dest: "{{ android_cmdline_temp_dir }}" + remote_src: yes + creates: "{{ android_cmdline_temp_dir }}/cmdline-tools" + when: not sdkmanager_installed.stat.exists + + + - name: Fix directory structure + copy: + src: "{{ android_cmdline_temp_dir }}/cmdline-tools/" + dest: "{{ android_sdk_location }}/cmdline-tools/latest" + remote_src: yes diff --git a/tests/integration/targets/android_sdk/vars/Alpine.yml b/tests/integration/targets/android_sdk/vars/Alpine.yml new file mode 100644 index 0000000000..593925f043 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Alpine.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: openjdk17-jre-headless diff --git a/tests/integration/targets/android_sdk/vars/Archlinux.yml b/tests/integration/targets/android_sdk/vars/Archlinux.yml new file mode 100644 index 0000000000..ff46870671 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Archlinux.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: jre17-openjdk-headless diff --git a/tests/integration/targets/android_sdk/vars/Darwin.yml b/tests/integration/targets/android_sdk/vars/Darwin.yml new file mode 100644 index 0000000000..696bf39a75 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Darwin.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: openjdk@17 diff --git a/tests/integration/targets/android_sdk/vars/Debian.yml b/tests/integration/targets/android_sdk/vars/Debian.yml new file mode 100644 index 0000000000..ddcfaaf1e3 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Debian.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: openjdk-17-jre-headless diff --git a/tests/integration/targets/android_sdk/vars/FreeBSD.yml b/tests/integration/targets/android_sdk/vars/FreeBSD.yml new file mode 100644 index 0000000000..61c1858423 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/FreeBSD.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: openjdk17-jre diff --git a/tests/integration/targets/android_sdk/vars/RedHat.yml b/tests/integration/targets/android_sdk/vars/RedHat.yml new file mode 100644 index 0000000000..40f44bd773 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/RedHat.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: java-17-openjdk-headless diff --git a/tests/integration/targets/android_sdk/vars/Suse.yml b/tests/integration/targets/android_sdk/vars/Suse.yml new file mode 100644 index 0000000000..40f44bd773 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Suse.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: java-17-openjdk-headless diff --git a/tests/integration/targets/android_sdk/vars/main.yml b/tests/integration/targets/android_sdk/vars/main.yml new file mode 100644 index 0000000000..9ba619a6d5 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/main.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +android_cmdline_temp_dir: "/tmp/cmdlinetools" +android_sdk_location: "/tmp/androidsdk" +commandline_tools_link: https://dl.google.com/android/repository/commandlinetools-linux-11076708_latest.zip From d2088ccfcc22e3893b1a3507dda4918573ddd9ef Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 21 Dec 2024 16:49:23 +0100 Subject: [PATCH 379/482] Polish botmeta extra sanity test and make it work without warnings on Python 3.13 (#9297) Polish botmeta and make it work without warnings on Python 3.13. --- tests/sanity/extra/botmeta.py | 76 +++++++++++++++++------------------ 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index 07ca189e81..9f7e977ea2 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -3,10 +3,9 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later """Check BOTMETA file.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -import ast +from __future__ import annotations + import os import re import sys @@ -65,26 +64,27 @@ AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])') def read_authors(filename): data = {} try: - with open(filename, 'rb') as b_module_data: - M = ast.parse(b_module_data.read()) + documentation = [] + in_docs = False + with open(filename, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith('DOCUMENTATION ='): + in_docs = True + elif line.startswith(("'''", '"""')) and in_docs: + in_docs = False + elif in_docs: + documentation.append(line) + if in_docs: + print(f'{filename}: cannot find DOCUMENTATION end') + return [] + if not documentation: + print(f'{filename}: cannot find DOCUMENTATION') + return [] - for child in M.body: - if isinstance(child, ast.Assign): - for t in child.targets: - try: - theid = t.id - except AttributeError: - # skip errors can happen when trying to use the normal code - continue - - if theid == 'DOCUMENTATION': - if isinstance(child.value, ast.Dict): - data = ast.literal_eval(child.value) - else: - data = yaml.safe_load(child.value.s) + data = yaml.safe_load('\n'.join(documentation)) except Exception as e: - print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e)) + print(f'{filename}:0:0: Cannot load DOCUMENTATION: {e}') return [] author = data.get('author') or [] @@ -107,21 +107,21 @@ def validate(filename, filedata): return if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): return - # Compile lis tof all active and inactive maintainers + # Compile list of all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] - if not filename.startswith('plugins/filter/'): + if not filename.startswith(('plugins/action/', 'plugins/doc_fragments/', 'plugins/filter/', 'plugins/module_utils/', 'plugins/plugin_utils/')): maintainers = read_authors(filename) for maintainer in maintainers: maintainer = extract_author_name(maintainer) if maintainer is not None and maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + others = ', '.join(all_maintainers) + msg = f'Author {maintainer} not mentioned as active or inactive maintainer for {filename} (mentioned are: {others})' + print(f'{FILENAME}:0:0: {msg}') should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS if not all_maintainers and not should_have_no_maintainer: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + print(f'{FILENAME}:0:0: No (active or inactive) maintainer mentioned for {filename}') if all_maintainers and should_have_no_maintainer: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0]))) + print(f'{FILENAME}:0:0: Please remove {filename} from the ignore list of {sys.argv[0]}') def main(): @@ -130,12 +130,12 @@ def main(): with open(FILENAME, 'rb') as f: botmeta = yaml.safe_load(f) except yaml.error.MarkedYAMLError as ex: - print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line + - 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + msg = re.sub(r'\s+', ' ', str(ex)) + print('f{FILENAME}:{ex.context_mark.line + 1}:{ex.context_mark.column + 1}: YAML load failed: {msg}') return except Exception as ex: # pylint: disable=broad-except - print('%s:%d:%d: YAML load failed: %s' % - (FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + msg = re.sub(r'\s+', ' ', str(ex)) + print(f'{FILENAME}:0:0: YAML load failed: {msg}') return # Validate schema @@ -168,7 +168,7 @@ def main(): except MultipleInvalid as ex: for error in ex.errors: # No way to get line/column numbers - print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error))) + print(f'{FILENAME}:0:0: {humanize_error(botmeta, error)}') return # Preprocess (substitute macros, convert to lists) @@ -180,7 +180,7 @@ def main(): macro = m.group(1) replacement = (macros[macro] or '') if macro == 'team_ansible_core': - return '$team_ansible_core %s' % replacement + return f'$team_ansible_core {replacement}' return replacement return macro_re.sub(f, text) @@ -195,13 +195,13 @@ def main(): if k in LIST_ENTRIES: filedata[k] = v.split() except KeyError as e: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e)) + print(f'{FILENAME}:0:0: Found unknown macro {e}') return # Scan all files unmatched = set(files) for dirs in ('docs/docsite/rst', 'plugins', 'tests', 'changelogs'): - for dirpath, dirnames, filenames in os.walk(dirs): + for dirpath, _dirnames, filenames in os.walk(dirs): for file in sorted(filenames): if file.endswith('.pyc'): continue @@ -216,10 +216,10 @@ def main(): if file in unmatched: unmatched.remove(file) if not matching_files: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + print(f'{FILENAME}:0:0: Did not find any entry for {filename}') matching_files.sort(key=lambda kv: kv[0]) - filedata = dict() + filedata = {} for k in LIST_ENTRIES: filedata[k] = [] for dummy, data in matching_files: @@ -230,7 +230,7 @@ def main(): validate(filename, filedata) for file in unmatched: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file)) + print(f'{FILENAME}:0:0: Entry {file} was not used') if __name__ == '__main__': From 1ee244f02dbf85f5348a9b03007f78446d28c76a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 22 Dec 2024 05:03:12 +1300 Subject: [PATCH 380/482] fix examples indentation (#9295) --- plugins/modules/yarn.py | 58 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/plugins/modules/yarn.py b/plugins/modules/yarn.py index a2a3c471b7..7109145ce8 100644 --- a/plugins/modules/yarn.py +++ b/plugins/modules/yarn.py @@ -88,41 +88,41 @@ requirements: """ EXAMPLES = r""" - - name: Install "imagemin" Node.js package. - community.general.yarn: - name: imagemin - path: /app/location +- name: Install "imagemin" Node.js package. + community.general.yarn: + name: imagemin + path: /app/location - - name: Install "imagemin" Node.js package on version 5.3.1 - community.general.yarn: - name: imagemin - version: '5.3.1' - path: /app/location +- name: Install "imagemin" Node.js package on version 5.3.1 + community.general.yarn: + name: imagemin + version: '5.3.1' + path: /app/location - - name: Install "imagemin" Node.js package globally. - community.general.yarn: - name: imagemin - global: true +- name: Install "imagemin" Node.js package globally. + community.general.yarn: + name: imagemin + global: true - - name: Remove the globally-installed package "imagemin". - community.general.yarn: - name: imagemin - global: true - state: absent +- name: Remove the globally-installed package "imagemin". + community.general.yarn: + name: imagemin + global: true + state: absent - - name: Install "imagemin" Node.js package from custom registry. - community.general.yarn: - name: imagemin - registry: 'http://registry.mysite.com' +- name: Install "imagemin" Node.js package from custom registry. + community.general.yarn: + name: imagemin + registry: 'http://registry.mysite.com' - - name: Install packages based on package.json. - community.general.yarn: - path: /app/location +- name: Install packages based on package.json. + community.general.yarn: + path: /app/location - - name: Update all packages in package.json to their latest version. - community.general.yarn: - path: /app/location - state: latest +- name: Update all packages in package.json to their latest version. + community.general.yarn: + path: /app/location + state: latest """ RETURN = r""" From afa5716e0baf9ce428216cb50b30f8c125c153ab Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 22 Dec 2024 05:03:23 +1300 Subject: [PATCH 381/482] yum_versionlock: adjust docs (#9280) * yum_versionlock: adjust docs * fix examples indentation --- plugins/modules/yum_versionlock.py | 59 +++++++++++++++--------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/plugins/modules/yum_versionlock.py b/plugins/modules/yum_versionlock.py index 0cbf9be393..4a618a9d17 100644 --- a/plugins/modules/yum_versionlock.py +++ b/plugins/modules/yum_versionlock.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: yum_versionlock version_added: 2.0.0 short_description: Locks / unlocks a installed package(s) from being updated by yum package manager @@ -32,62 +31,62 @@ options: elements: str state: description: - - If state is V(present), package(s) will be added to yum versionlock list. - - If state is V(absent), package(s) will be removed from yum versionlock list. - choices: [ 'absent', 'present' ] + - If state is V(present), package(s) will be added to yum versionlock list. + - If state is V(absent), package(s) will be removed from yum versionlock list. + choices: ['absent', 'present'] type: str default: present notes: - - Requires yum-plugin-versionlock package on the remote node. + - Requires yum-plugin-versionlock package on the remote node. requirements: -- yum -- yum-versionlock + - yum + - yum-versionlock author: - - Florian Paul Azim Hoberg (@gyptazy) - - Amin Vakil (@aminvakil) -''' + - Florian Paul Azim Hoberg (@gyptazy) + - Amin Vakil (@aminvakil) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prevent Apache / httpd from being updated community.general.yum_versionlock: state: present name: - - httpd + - httpd - name: Prevent Apache / httpd version 2.4.57-2 from being updated community.general.yum_versionlock: state: present name: - - httpd-0:2.4.57-2.el9 + - httpd-0:2.4.57-2.el9 - name: Prevent multiple packages from being updated community.general.yum_versionlock: state: present name: - - httpd - - nginx - - haproxy - - curl + - httpd + - nginx + - haproxy + - curl - name: Remove lock from Apache / httpd to be updated again community.general.yum_versionlock: state: absent name: httpd -''' +""" -RETURN = r''' +RETURN = r""" packages: - description: A list of package(s) in versionlock list. - returned: success - type: list - elements: str - sample: [ 'httpd' ] + description: A list of package(s) in versionlock list. + returned: success + type: list + elements: str + sample: ['httpd'] state: - description: State of package(s). - returned: success - type: str - sample: present -''' + description: State of package(s). + returned: success + type: str + sample: present +""" import re from ansible.module_utils.basic import AnsibleModule From 1b6c05176b0ce705eaf7d0c5d72a69ffd889a716 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 22 Dec 2024 11:42:54 +1300 Subject: [PATCH 382/482] zfs modules: adjust docs (#9281) * zfs modules: adjust docs * Apply suggestions from code review * fix examples indentation * Update plugins/modules/zfs.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/zfs.py | 30 ++-- plugins/modules/zfs_delegate_admin.py | 29 ++-- plugins/modules/zfs_facts.py | 230 +++++++++++++------------- 3 files changed, 140 insertions(+), 149 deletions(-) diff --git a/plugins/modules/zfs.py b/plugins/modules/zfs.py index f23cc4580d..1b00010d8a 100644 --- a/plugins/modules/zfs.py +++ b/plugins/modules/zfs.py @@ -9,23 +9,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zfs -short_description: Manage zfs +short_description: Manage ZFS description: - - Manages ZFS file systems, volumes, clones and snapshots + - Manages ZFS file systems, volumes, clones and snapshots. extends_documentation_fragment: - community.general.attributes attributes: check_mode: support: partial details: - - In certain situations it may report a task as changed that will not be reported - as changed when C(check_mode) is disabled. - - For example, this might occur when the zpool C(altroot) option is set or when - a size is written using human-readable notation, such as V(1M) or V(1024K), - instead of as an unqualified byte count, such as V(1048576). + - In certain situations it may report a task as changed that will not be reported as changed when C(check_mode) is disabled. + - For example, this might occur when the zpool C(altroot) option is set or when a size is written using human-readable notation, such as + V(1M) or V(1024K), instead of as an unqualified byte count, such as V(1048576). diff_mode: support: full options: @@ -36,10 +33,9 @@ options: type: str state: description: - - Whether to create (V(present)), or remove (V(absent)) a - file system, snapshot or volume. All parents/children - will be created/destroyed as needed to reach the desired state. - choices: [ absent, present ] + - Whether to create (V(present)), or remove (V(absent)) a file system, snapshot or volume. All parents/children will be created/destroyed + as needed to reach the desired state. + choices: [absent, present] required: true type: str origin: @@ -53,10 +49,10 @@ options: type: dict default: {} author: -- Johan Wiren (@johanwiren) -''' + - Johan Wiren (@johanwiren) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new file system called myfs in pool rpool with the setuid property turned off community.general.zfs: name: rpool/myfs @@ -93,7 +89,7 @@ EXAMPLES = ''' community.general.zfs: name: rpool/myfs state: absent -''' +""" import os diff --git a/plugins/modules/zfs_delegate_admin.py b/plugins/modules/zfs_delegate_admin.py index 24f7422206..796cbd4595 100644 --- a/plugins/modules/zfs_delegate_admin.py +++ b/plugins/modules/zfs_delegate_admin.py @@ -8,18 +8,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: zfs_delegate_admin short_description: Manage ZFS delegated administration (user admin privileges) description: - - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS - operations normally restricted to the superuser. + - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS operations normally restricted + to the superuser. - See the C(zfs allow) section of V(zfs(1M\)) for detailed explanations of options. - This module attempts to adhere to the behavior of the command line tool as much as possible. requirements: - - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all - versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." + - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all versions), FreeBSD >= 8.0R, + ZFS on Linux >= 0.7.0." extends_documentation_fragment: - community.general.attributes attributes: @@ -38,7 +37,7 @@ options: - Whether to allow (V(present)), or unallow (V(absent)) a permission. - When set to V(present), at least one "entity" param of O(users), O(groups), or O(everyone) are required. - When set to V(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. - choices: [ absent, present ] + choices: [absent, present] default: present type: str users: @@ -59,8 +58,8 @@ options: permissions: description: - The list of permission(s) to delegate (required if O(state=present)). - - Supported permissions depend on the ZFS version in use. See for example - U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. + - Supported permissions depend on the ZFS version in use. See for example U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) + for OpenZFS. type: list elements: str local: @@ -77,10 +76,10 @@ options: type: bool default: false author: -- Nate Coraor (@natefoo) -''' + - Nate Coraor (@natefoo) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope community.general.zfs_delegate_admin: name: rpool/myfs @@ -106,12 +105,12 @@ EXAMPLES = r''' name: rpool/myfs everyone: true state: absent -''' +""" # This module does not return anything other than the standard # changed/state/msg/stdout -RETURN = ''' -''' +RETURN = r""" +""" from itertools import product diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py index bb4530c473..25fd10099b 100644 --- a/plugins/modules/zfs_facts.py +++ b/plugins/modules/zfs_facts.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zfs_facts short_description: Gather facts about ZFS datasets description: @@ -21,45 +20,42 @@ extends_documentation_fragment: - community.general.attributes.facts - community.general.attributes.facts_module options: - name: - description: - - ZFS dataset name. - required: true - aliases: [ "ds", "dataset" ] - type: str - recurse: - description: - - Specifies if properties for any children should be recursively - displayed. - type: bool - default: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zfs(1M) man page. - default: all - type: str - type: - description: - - Specifies which datasets types to display. Multiple values have to be - provided in comma-separated form. - choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] - default: all - type: str - depth: - description: - - Specifies recursion depth. - type: int - default: 0 -''' + name: + description: + - ZFS dataset name. + required: true + aliases: ["ds", "dataset"] + type: str + recurse: + description: + - Specifies if properties for any children should be recursively displayed. + type: bool + default: false + parsable: + description: + - Specifies if property values should be displayed in machine friendly format. + type: bool + default: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zfs(1M) + man page. + default: all + type: str + type: + description: + - Specifies which datasets types to display. Multiple values have to be provided in comma-separated form. + choices: ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] + default: all + type: str + depth: + description: + - Specifies recursion depth. + type: int + default: 0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather facts about ZFS dataset rpool/export/home community.general.zfs_facts: dataset: rpool/export/home @@ -73,88 +69,88 @@ EXAMPLES = ''' - ansible.builtin.debug: msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' with_items: '{{ ansible_zfs_datasets }}' -''' +""" -RETURN = ''' +RETURN = r""" name: - description: ZFS dataset name - returned: always - type: str - sample: rpool/var/spool + description: ZFS dataset name + returned: always + type: str + sample: rpool/var/spool parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: true + description: if parsable output should be provided in machine friendly format. + returned: if O(parsable=True) + type: bool + sample: true recurse: - description: if we should recurse over ZFS dataset - returned: if 'recurse' is set to True - type: bool - sample: true + description: if we should recurse over ZFS dataset + returned: if O(recurse=True) + type: bool + sample: true zfs_datasets: - description: ZFS dataset facts - returned: always - type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } -''' + description: ZFS dataset facts + returned: always + type: str + sample: + { + "aclinherit": "restricted", + "aclmode": "discard", + "atime": "on", + "available": "43.8G", + "canmount": "on", + "casesensitivity": "sensitive", + "checksum": "on", + "compression": "off", + "compressratio": "1.00x", + "copies": "1", + "creation": "Thu Jun 16 11:37 2016", + "dedup": "off", + "devices": "on", + "exec": "on", + "filesystem_count": "none", + "filesystem_limit": "none", + "logbias": "latency", + "logicalreferenced": "18.5K", + "logicalused": "3.45G", + "mlslabel": "none", + "mounted": "yes", + "mountpoint": "/rpool", + "name": "rpool", + "nbmand": "off", + "normalization": "none", + "org.openindiana.caiman:install": "ready", + "primarycache": "all", + "quota": "none", + "readonly": "off", + "recordsize": "128K", + "redundant_metadata": "all", + "refcompressratio": "1.00x", + "referenced": "29.5K", + "refquota": "none", + "refreservation": "none", + "reservation": "none", + "secondarycache": "all", + "setuid": "on", + "sharenfs": "off", + "sharesmb": "off", + "snapdir": "hidden", + "snapshot_count": "none", + "snapshot_limit": "none", + "sync": "standard", + "type": "filesystem", + "used": "4.41G", + "usedbychildren": "4.41G", + "usedbydataset": "29.5K", + "usedbyrefreservation": "0", + "usedbysnapshots": "0", + "utf8only": "off", + "version": "5", + "vscan": "off", + "written": "29.5K", + "xattr": "on", + "zoned": "off" + } +""" from collections import defaultdict From 2adcc34dd551f54d2f81a34d6a7b299b290f9430 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 22 Dec 2024 21:53:25 +0100 Subject: [PATCH 383/482] CI: Arch Linux updated to Python 3.13 (#9310) * Arch Linux updated to Python 3.13. * Skip lmdb_kv lookup on Arch Linux. Ref: https://github.com/jnwatson/py-lmdb/issues/362 --- .azure-pipelines/azure-pipelines.yml | 2 +- tests/integration/targets/lookup_lmdb_kv/runme.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 362b5d59dd..b9d428f2b0 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -345,7 +345,7 @@ stages: - name: Debian Bookworm test: debian-bookworm/3.11 - name: ArchLinux - test: archlinux/3.12 + test: archlinux/3.13 groups: - 1 - 2 diff --git a/tests/integration/targets/lookup_lmdb_kv/runme.sh b/tests/integration/targets/lookup_lmdb_kv/runme.sh index 71faa439d1..286ec6b16d 100755 --- a/tests/integration/targets/lookup_lmdb_kv/runme.sh +++ b/tests/integration/targets/lookup_lmdb_kv/runme.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-3.0-or-later set -eux +if grep -Fq 'NAME="Arch Linux"' /etc/os-release; then + exit 0 +fi + ANSIBLE_ROLES_PATH=../ \ ansible-playbook dependencies.yml -v "$@" From b57fef201e282937c8772ec9246e3fb61d06dbbc Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 21:28:05 +1300 Subject: [PATCH 384/482] [vw]*: adjust docs (#9309) [vm]*: adjust docs --- plugins/modules/vdo.py | 439 +++++++++-------------- plugins/modules/vertica_configuration.py | 41 +-- plugins/modules/vertica_info.py | 26 +- plugins/modules/vertica_role.py | 23 +- plugins/modules/vertica_schema.py | 37 +- plugins/modules/vertica_user.py | 38 +- plugins/modules/vexata_eg.py | 35 +- plugins/modules/vexata_volume.py | 24 +- plugins/modules/vmadm.py | 78 ++-- plugins/modules/wakeonlan.py | 27 +- plugins/modules/wdc_redfish_command.py | 35 +- plugins/modules/wdc_redfish_info.py | 57 ++- 12 files changed, 353 insertions(+), 507 deletions(-) diff --git a/plugins/modules/vdo.py b/plugins/modules/vdo.py index 8b0e745960..c7df2d234c 100644 --- a/plugins/modules/vdo.py +++ b/plugins/modules/vdo.py @@ -8,10 +8,9 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Bryan Gurney (@bgurney-rh) + - Bryan Gurney (@bgurney-rh) module: vdo @@ -19,277 +18,189 @@ short_description: Module to control VDO description: - - This module controls the VDO dedupe and compression device. - - VDO, or Virtual Data Optimizer, is a device-mapper target that - provides inline block-level deduplication, compression, and - thin provisioning capabilities to primary storage. - + - This module controls the VDO dedupe and compression device. + - VDO, or Virtual Data Optimizer, is a device-mapper target that provides inline block-level deduplication, compression, and thin provisioning + capabilities to primary storage. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the VDO volume. - type: str - required: true - state: - description: - - Whether this VDO volume should be "present" or "absent". - If a "present" VDO volume does not exist, it will be - created. If a "present" VDO volume already exists, it - will be modified, by updating the configuration, which - will take effect when the VDO volume is restarted. - Not all parameters of an existing VDO volume can be - modified; the "statusparamkeys" list contains the - parameters that can be modified after creation. If an - "absent" VDO volume does not exist, it will not be - removed. - type: str - choices: [ absent, present ] - default: present - activated: - description: - - The "activate" status for a VDO volume. If this is set - to V(false), the VDO volume cannot be started, and it will - not start on system startup. However, on initial - creation, a VDO volume with "activated" set to "off" - will be running, until stopped. This is the default - behavior of the "vdo create" command; it provides the - user an opportunity to write a base amount of metadata - (filesystem, LVM headers, etc.) to the VDO volume prior - to stopping the volume, and leaving it deactivated - until ready to use. - type: bool - running: - description: - - Whether this VDO volume is running. - - A VDO volume must be activated in order to be started. - type: bool - device: - description: - - The full path of the device to use for VDO storage. - - This is required if "state" is "present". - type: str - logicalsize: - description: - - The logical size of the VDO volume (in megabytes, or - LVM suffix format). If not specified for a new volume, - this defaults to the same size as the underlying storage - device, which is specified in the 'device' parameter. - Existing volumes will maintain their size if the - logicalsize parameter is not specified, or is smaller - than or identical to the current size. If the specified - size is larger than the current size, a growlogical - operation will be performed. - type: str - deduplication: - description: - - Configures whether deduplication is enabled. The - default for a created volume is 'enabled'. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - type: str - choices: [ disabled, enabled ] - compression: - description: - - Configures whether compression is enabled. The default - for a created volume is 'enabled'. Existing volumes - will maintain their previously configured setting unless - a different value is specified in the playbook. - type: str - choices: [ disabled, enabled ] - blockmapcachesize: - description: - - The amount of memory allocated for caching block map - pages, in megabytes (or may be issued with an LVM-style - suffix of K, M, G, or T). The default (and minimum) - value is 128M. The value specifies the size of the - cache; there is a 15% memory usage overhead. Each 1.25G - of block map covers 1T of logical blocks, therefore a - small amount of block map cache memory can cache a - significantly large amount of block map data. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - type: str - readcache: - description: - - Enables or disables the read cache. The default is - 'disabled'. Choosing 'enabled' enables a read cache - which may improve performance for workloads of high - deduplication, read workloads with a high level of - compression, or on hard disk storage. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - - The read cache feature is available in VDO 6.1 and older. - type: str - choices: [ disabled, enabled ] - readcachesize: - description: - - Specifies the extra VDO device read cache size in - megabytes. This is in addition to a system-defined - minimum. Using a value with a suffix of K, M, G, or T - is optional. The default value is 0. 1.125 MB of - memory per bio thread will be used per 1 MB of read - cache specified (for example, a VDO volume configured - with 4 bio threads will have a read cache memory usage - overhead of 4.5 MB per 1 MB of read cache specified). - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - - The read cache feature is available in VDO 6.1 and older. - type: str - emulate512: - description: - - Enables 512-byte emulation mode, allowing drivers or - filesystems to access the VDO volume at 512-byte - granularity, instead of the default 4096-byte granularity. - Default is 'disabled'; only recommended when a driver - or filesystem requires 512-byte sector level access to - a device. This option is only available when creating - a new volume, and cannot be changed for an existing - volume. - type: bool - default: false - growphysical: - description: - - Specifies whether to attempt to execute a growphysical - operation, if there is enough unused space on the - device. A growphysical operation will be executed if - there is at least 64 GB of free space, relative to the - previous physical size of the affected VDO volume. - type: bool - default: false - slabsize: - description: - - The size of the increment by which the physical size of - a VDO volume is grown, in megabytes (or may be issued - with an LVM-style suffix of K, M, G, or T). Must be a - power of two between 128M and 32G. The default is 2G, - which supports volumes having a physical size up to 16T. - The maximum, 32G, supports a physical size of up to 256T. - This option is only available when creating a new - volume, and cannot be changed for an existing volume. - type: str - writepolicy: - description: - - Specifies the write policy of the VDO volume. The - 'sync' mode acknowledges writes only after data is on - stable storage. The 'async' mode acknowledges writes - when data has been cached for writing to stable - storage. The default (and highly recommended) 'auto' - mode checks the storage device to determine whether it - supports flushes. Devices that support flushes will - result in a VDO volume in 'async' mode, while devices - that do not support flushes will run in sync mode. - Existing volumes will maintain their previously - configured setting unless a different value is - specified in the playbook. - type: str - choices: [ async, auto, sync ] - indexmem: - description: - - Specifies the amount of index memory in gigabytes. The - default is 0.25. The special decimal values 0.25, 0.5, - and 0.75 can be used, as can any positive integer. - This option is only available when creating a new - volume, and cannot be changed for an existing volume. - type: str - indexmode: - description: - - Specifies the index mode of the Albireo index. The - default is 'dense', which has a deduplication window of - 1 GB of index memory per 1 TB of incoming data, - requiring 10 GB of index data on persistent storage. - The 'sparse' mode has a deduplication window of 1 GB of - index memory per 10 TB of incoming data, but requires - 100 GB of index data on persistent storage. This option - is only available when creating a new volume, and cannot - be changed for an existing volume. - type: str - choices: [ dense, sparse ] - ackthreads: - description: - - Specifies the number of threads to use for - acknowledging completion of requested VDO I/O operations. - Valid values are integer values from 1 to 100 (lower - numbers are preferable due to overhead). The default is - 1. Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - biothreads: - description: - - Specifies the number of threads to use for submitting I/O - operations to the storage device. Valid values are - integer values from 1 to 100 (lower numbers are - preferable due to overhead). The default is 4. - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - cputhreads: - description: - - Specifies the number of threads to use for CPU-intensive - work such as hashing or compression. Valid values are - integer values from 1 to 100 (lower numbers are - preferable due to overhead). The default is 2. - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - logicalthreads: - description: - - Specifies the number of threads across which to - subdivide parts of the VDO processing based on logical - block addresses. Valid values are integer values from - 1 to 100 (lower numbers are preferable due to overhead). - The default is 1. Existing volumes will maintain their - previously configured setting unless a different value - is specified in the playbook. - type: str - physicalthreads: - description: - - Specifies the number of threads across which to - subdivide parts of the VDO processing based on physical - block addresses. Valid values are integer values from - 1 to 16 (lower numbers are preferable due to overhead). - The physical space used by the VDO volume must be - larger than (slabsize * physicalthreads). The default - is 1. Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - force: - description: - - When creating a volume, ignores any existing file system - or VDO signature already present in the storage device. - When stopping or removing a VDO volume, first unmounts - the file system stored on the device if mounted. - - "B(Warning:) Since this parameter removes all safety - checks it is important to make sure that all parameters - provided are accurate and intentional." - type: bool - default: false - version_added: 2.4.0 + name: + description: + - The name of the VDO volume. + type: str + required: true + state: + description: + - Whether this VDO volume should be V(present) or V(absent). If a V(present) VDO volume does not exist, it will be created. If a V(present) + VDO volume already exists, it will be modified, by updating the configuration, which will take effect when the VDO volume is restarted. + Not all parameters of an existing VDO volume can be modified; the C(statusparamkeys) list in the code contains the parameters that can + be modified after creation. If an V(absent) VDO volume does not exist, it will not be removed. + type: str + choices: [absent, present] + default: present + activated: + description: + - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it will not start on system + startup. However, on initial creation, a VDO volume with "activated" set to "off" will be running, until stopped. This is the default + behavior of the C(vdo create) command; it provides the user an opportunity to write a base amount of metadata (filesystem, LVM headers, + etc.) to the VDO volume prior to stopping the volume, and leaving it deactivated until ready to use. + type: bool + running: + description: + - Whether this VDO volume is running. + - A VDO volume must be activated in order to be started. + type: bool + device: + description: + - The full path of the device to use for VDO storage. + - This is required if O(state=present). + type: str + logicalsize: + description: + - The logical size of the VDO volume (in megabytes, or LVM suffix format). If not specified for a new volume, this defaults to the same + size as the underlying storage device, which is specified in the O(device) parameter. Existing volumes will maintain their size if the + logicalsize parameter is not specified, or is smaller than or identical to the current size. If the specified size is larger than the + current size, a C(growlogical) operation will be performed. + type: str + deduplication: + description: + - Configures whether deduplication is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously + configured setting unless a different value is specified in the playbook. + type: str + choices: [disabled, enabled] + compression: + description: + - Configures whether compression is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously + configured setting unless a different value is specified in the playbook. + type: str + choices: [disabled, enabled] + blockmapcachesize: + description: + - The amount of memory allocated for caching block map pages, in megabytes (or may be issued with an LVM-style suffix of K, M, G, or T). + The default (and minimum) value is V(128M). The value specifies the size of the cache; there is a 15% memory usage overhead. Each 1.25G + of block map covers 1T of logical blocks, therefore a small amount of block map cache memory can cache a significantly large amount of + block map data. + - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + type: str + readcache: + description: + - Enables or disables the read cache. The default is V(disabled). Choosing V(enabled) enables a read cache which may improve performance + for workloads of high deduplication, read workloads with a high level of compression, or on hard disk storage. Existing volumes will maintain + their previously configured setting unless a different value is specified in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + choices: [disabled, enabled] + readcachesize: + description: + - Specifies the extra VDO device read cache size in megabytes. This is in addition to a system-defined minimum. Using a value with a suffix + of K, M, G, or T is optional. The default value is V(0). 1.125 MB of memory per bio thread will be used per 1 MB of read cache specified + (for example, a VDO volume configured with 4 bio threads will have a read cache memory usage overhead of 4.5 MB per 1 MB of read cache + specified). Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + emulate512: + description: + - Enables 512-byte emulation mode, allowing drivers or filesystems to access the VDO volume at 512-byte granularity, instead of the default + 4096-byte granularity. + - Only recommended when a driver or filesystem requires 512-byte sector level access to a device. + - This option is only available when creating a new volume, and cannot be changed for an existing volume. + type: bool + default: false + growphysical: + description: + - Specifies whether to attempt to execute a C(growphysical) operation, if there is enough unused space on the device. A C(growphysical) + operation will be executed if there is at least 64 GB of free space, relative to the previous physical size of the affected VDO volume. + type: bool + default: false + slabsize: + description: + - The size of the increment by which the physical size of a VDO volume is grown, in megabytes (or may be issued with an LVM-style suffix + of K, M, G, or T). Must be a power of two between 128M and 32G. The default is V(2G), which supports volumes having a physical size up + to 16T. The maximum, V(32G), supports a physical size of up to 256T. This option is only available when creating a new volume, and cannot + be changed for an existing volume. + type: str + writepolicy: + description: + - Specifies the write policy of the VDO volume. + - The V(sync) mode acknowledges writes only after data is on stable storage. + - The V(async) mode acknowledges writes when data has been cached for writing to stable storage. + - The default (and highly recommended) V(auto) mode checks the storage device to determine whether it supports flushes. Devices that support + flushes will result in a VDO volume in V(async) mode, while devices that do not support flushes will run in V(sync) mode. + - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + type: str + choices: [async, auto, sync] + indexmem: + description: + - Specifies the amount of index memory in gigabytes. The default is V(0.25). The special decimal values V(0.25), V(0.5), and V(0.75) can + be used, as can any positive integer. This option is only available when creating a new volume, and cannot be changed for an existing + volume. + type: str + indexmode: + description: + - Specifies the index mode of the Albireo index. + - The default is V(dense), which has a deduplication window of 1 GB of index memory per 1 TB of incoming data, requiring 10 GB of index + data on persistent storage. + - The V(sparse) mode has a deduplication window of 1 GB of index memory per 10 TB of incoming data, but requires 100 GB of index data on + persistent storage. + - This option is only available when creating a new volume, and cannot be changed for an existing volume. + type: str + choices: [dense, sparse] + ackthreads: + description: + - Specifies the number of threads to use for acknowledging completion of requested VDO I/O operations. Valid values are integer values from + V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + biothreads: + description: + - Specifies the number of threads to use for submitting I/O operations to the storage device. Valid values are integer values from V(1) + to V(100) (lower numbers are preferable due to overhead). The default is V(4). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + cputhreads: + description: + - Specifies the number of threads to use for CPU-intensive work such as hashing or compression. Valid values are integer values from V(1) + to V(100) (lower numbers are preferable due to overhead). The default is V(2). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + logicalthreads: + description: + - Specifies the number of threads across which to subdivide parts of the VDO processing based on logical block addresses. Valid values are + integer values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain + their previously configured setting unless a different value is specified in the playbook. + type: str + physicalthreads: + description: + - Specifies the number of threads across which to subdivide parts of the VDO processing based on physical block addresses. Valid values + are integer values from V(1) to V(16) (lower numbers are preferable due to overhead). The physical space used by the VDO volume must be + larger than (O(slabsize) * O(physicalthreads)). The default is V(1). Existing volumes will maintain their previously configured setting + unless a different value is specified in the playbook. + type: str + force: + description: + - When creating a volume, ignores any existing file system or VDO signature already present in the storage device. When stopping or removing + a VDO volume, first unmounts the file system stored on the device if mounted. + - B(Warning:) Since this parameter removes all safety checks it is important to make sure that all parameters provided are accurate and + intentional. + type: bool + default: false + version_added: 2.4.0 notes: - In general, the default thread configuration should be used. requirements: - PyYAML - kmod-kvdo - vdo -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create 2 TB VDO volume vdo1 on device /dev/md0 community.general.vdo: name: vdo1 @@ -301,9 +212,9 @@ EXAMPLES = r''' community.general.vdo: name: vdo1 state: absent -''' +""" -RETURN = r'''# ''' +RETURN = r"""# """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib import re diff --git a/plugins/modules/vertica_configuration.py b/plugins/modules/vertica_configuration.py index 09b80df3d7..9ce2e42d15 100644 --- a/plugins/modules/vertica_configuration.py +++ b/plugins/modules/vertica_configuration.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_configuration short_description: Updates Vertica configuration parameters description: - - Updates Vertica configuration parameters. + - Updates Vertica configuration parameters. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -24,51 +23,49 @@ attributes: options: parameter: description: - - Name of the parameter to update. + - Name of the parameter to update. required: true aliases: [name] type: str value: description: - - Value of the parameter to be set. + - Value of the parameter to be set. type: str db: description: - - Name of the Vertica database. + - Name of the Vertica database. type: str cluster: description: - - Name of the Vertica cluster. + - Name of the Vertica cluster. default: localhost type: str port: description: - - Vertica cluster port to connect to. + - Vertica cluster port to connect to. default: '5433' type: str login_user: description: - - The username used to authenticate with. + - The username used to authenticate with. default: dbadmin type: str login_password: description: - - The password used to authenticate with. + - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Updating load_balance_policy community.general.vertica_configuration: name=failovertostandbyafter value='8 hours' """ diff --git a/plugins/modules/vertica_info.py b/plugins/modules/vertica_info.py index 93ccc68445..bfb99552a0 100644 --- a/plugins/modules/vertica_info.py +++ b/plugins/modules/vertica_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_info short_description: Gathers Vertica database facts description: @@ -25,8 +24,7 @@ options: default: localhost type: str port: - description: - Database port to connect to. + description: Database port to connect to. default: '5433' type: str db: @@ -43,19 +41,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) are installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Gathering vertica facts community.general.vertica_info: db=db_name register: result diff --git a/plugins/modules/vertica_role.py b/plugins/modules/vertica_role.py index a1ef40c7a5..c3e15b4b95 100644 --- a/plugins/modules/vertica_role.py +++ b/plugins/modules/vertica_role.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_role short_description: Adds or removes Vertica database roles and assigns roles to them description: @@ -64,19 +63,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica role community.general.vertica_role: name=role_name db=db_name state=present diff --git a/plugins/modules/vertica_schema.py b/plugins/modules/vertica_schema.py index 95e434ef3a..b9e243ec7b 100644 --- a/plugins/modules/vertica_schema.py +++ b/plugins/modules/vertica_schema.py @@ -9,17 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_schema short_description: Adds or removes Vertica database schema and roles description: - - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. + - Adds or removes Vertica database schema and, optionally, roles with schema access privileges. - A schema will not be removed until all the objects have been dropped. - - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. + - In such a situation, if the module tries to remove the schema it will fail and only remove roles created for the schema if they have no dependencies. extends_documentation_fragment: - community.general.attributes attributes: @@ -78,19 +74,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica schema community.general.vertica_schema: name=schema_name db=db_name state=present @@ -98,12 +92,7 @@ EXAMPLES = """ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present - name: Creating a new schema with roles - community.general.vertica_schema: - name=schema_name - create_roles=schema_name_all - usage_roles=schema_name_ro,schema_name_rw - db=db_name - state=present + community.general.vertica_schema: name=schema_name create_roles=schema_name_all usage_roles=schema_name_ro,schema_name_rw db=db_name state=present """ import traceback diff --git a/plugins/modules/vertica_user.py b/plugins/modules/vertica_user.py index 7a62bec44c..c73e0d54fd 100644 --- a/plugins/modules/vertica_user.py +++ b/plugins/modules/vertica_user.py @@ -8,15 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vertica_user short_description: Adds or removes Vertica database users and assigns roles description: - Adds or removes Vertica database user and, optionally, assigns roles. - A user will not be removed until all the dependencies have been dropped. - - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. + - In such a situation, if the module tries to remove the user it will fail and only remove roles granted to the user. extends_documentation_fragment: - community.general.attributes attributes: @@ -42,9 +40,8 @@ options: password: description: - The user's password encrypted by the MD5 algorithm. - - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select V('md5'||md5(''\)). + - The password must be generated with the format C("md5" + md5[password + username]), resulting in a total of 35 characters. An easy way + to do this is by querying the Vertica database with select V('md5'||md5(''\)). type: str expired: description: @@ -90,29 +87,22 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica user with password community.general.vertica_user: name=user_name password=md5 db=db_name state=present - name: Creating a new vertica user authenticated via ldap with roles assigned - community.general.vertica_user: - name=user_name - ldap=true - db=db_name - roles=schema_name_ro - state=present + community.general.vertica_user: name=user_name ldap=true db=db_name roles=schema_name_ro state=present """ import traceback diff --git a/plugins/modules/vexata_eg.py b/plugins/modules/vexata_eg.py index 457d1fa9ed..f7184d68b0 100644 --- a/plugins/modules/vexata_eg.py +++ b/plugins/modules/vexata_eg.py @@ -9,15 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vexata_eg short_description: Manage export groups on Vexata VX100 storage arrays description: - - Create or delete export groups on a Vexata VX100 array. - - An export group is a tuple of a volume group, initiator group and port - group that allows a set of volumes to be exposed to one or more hosts - through specific array ports. + - Create or delete export groups on a Vexata VX100 array. + - An export group is a tuple of a volume group, initiator group and port group that allows a set of volumes to be exposed to one or more hosts + through specific array ports. author: - Sandeep Kasargod (@vexata) attributes: @@ -33,29 +31,28 @@ options: type: str state: description: - - Creates export group when present or delete when absent. + - Creates export group when present or delete when absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str vg: description: - - Volume group name. + - Volume group name. type: str ig: description: - - Initiator group name. + - Initiator group name. type: str pg: description: - - Port group name. + - Port group name. type: str extends_documentation_fragment: -- community.general.vexata.vx100 -- community.general.attributes + - community.general.vexata.vx100 + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create export group named db_export. community.general.vexata_eg: name: db_export @@ -74,10 +71,10 @@ EXAMPLES = r''' array: vx100_ultra.test.com user: admin password: secret -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( diff --git a/plugins/modules/vexata_volume.py b/plugins/modules/vexata_volume.py index 7fdfc7e5fa..29136eb31e 100644 --- a/plugins/modules/vexata_volume.py +++ b/plugins/modules/vexata_volume.py @@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vexata_volume short_description: Manage volumes on Vexata VX100 storage arrays description: - - Create, deletes or extend volumes on a Vexata VX100 array. + - Create, deletes or extend volumes on a Vexata VX100 array. author: -- Sandeep Kasargod (@vexata) + - Sandeep Kasargod (@vexata) attributes: check_mode: support: full @@ -32,19 +31,18 @@ options: description: - Creates/Modifies volume when present or removes when absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str size: description: - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. type: str extends_documentation_fragment: -- community.general.vexata.vx100 -- community.general.attributes + - community.general.vexata.vx100 + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create new 2 TiB volume named foo community.general.vexata_volume: name: foo @@ -70,10 +68,10 @@ EXAMPLES = r''' array: vx100_ultra.test.com user: admin password: secret -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py index 923a902bcf..148ca18b86 100644 --- a/plugins/modules/vmadm.py +++ b/plugins/modules/vmadm.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vmadm short_description: Manage SmartOS virtual machines and zones description: @@ -27,8 +26,7 @@ options: archive_on_delete: required: false description: - - When enabled, the zone dataset will be mounted on C(/zones/archive) - upon removal. + - When enabled, the zone dataset will be mounted on C(/zones/archive) upon removal. type: bool autoboot: required: false @@ -36,7 +34,7 @@ options: - Whether or not a VM is booted when the system is rebooted. type: bool brand: - choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] + choices: [joyent, joyent-minimal, lx, kvm, bhyve] default: joyent description: - Type of virtual machine. The V(bhyve) option was added in community.general 0.2.0. @@ -49,18 +47,16 @@ options: cpu_cap: required: false description: - - Sets a limit on the amount of CPU time that can be used by a VM. - Use V(0) for no cap. + - Sets a limit on the amount of CPU time that can be used by a VM. Use V(0) for no cap. type: int cpu_shares: required: false description: - - Sets a limit on the number of fair share scheduler (FSS) CPU shares for - a VM. This limit is relative to all other VMs on the system. + - Sets a limit on the number of fair share scheduler (FSS) CPU shares for a VM. This limit is relative to all other VMs on the system. type: int cpu_type: required: false - choices: [ qemu64, host ] + choices: [qemu64, host] default: qemu64 description: - Control the type of virtual CPU exposed to KVM VMs. @@ -68,8 +64,7 @@ options: customer_metadata: required: false description: - - Metadata to be set and associated with this VM, this contain customer - modifiable keys. + - Metadata to be set and associated with this VM, this contain customer modifiable keys. type: dict delegate_dataset: required: false @@ -141,14 +136,12 @@ options: internal_metadata: required: false description: - - Metadata to be set and associated with this VM, this contains operator - generated keys. + - Metadata to be set and associated with this VM, this contains operator generated keys. type: dict internal_metadata_namespace: required: false description: - - List of namespaces to be set as C(internal_metadata-only); these namespaces - will come from O(internal_metadata) rather than O(customer_metadata). + - List of namespaces to be set as C(internal_metadata-only); these namespaces will come from O(internal_metadata) rather than O(customer_metadata). type: str kernel_version: required: false @@ -163,8 +156,7 @@ options: maintain_resolvers: required: false description: - - Resolvers in C(/etc/resolv.conf) will be updated when updating - the O(resolvers) property. + - Resolvers in C(/etc/resolv.conf) will be updated when updating the O(resolvers) property. type: bool max_locked_memory: required: false @@ -189,12 +181,11 @@ options: mdata_exec_timeout: required: false description: - - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service - that runs user-scripts in the zone. + - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service that runs user-scripts in the zone. type: int name: required: false - aliases: [ alias ] + aliases: [alias] description: - Name of the VM. vmadm(1M) uses this as an optional name. type: str @@ -212,14 +203,12 @@ options: nowait: required: false description: - - Consider the provisioning complete when the VM first starts, rather than - when the VM has rebooted. + - Consider the provisioning complete when the VM first starts, rather than when the VM has rebooted. type: bool qemu_opts: required: false description: - - Additional qemu arguments for KVM guests. This overwrites the default arguments - provided by vmadm(1M) and should only be used for debugging. + - Additional qemu arguments for KVM guests. This overwrites the default arguments provided by vmadm(1M) and should only be used for debugging. type: str qemu_extra_opts: required: false @@ -245,8 +234,7 @@ options: routes: required: false description: - - Dictionary that maps destinations to gateways, these will be set as static - routes in the VM. + - Dictionary that maps destinations to gateways, these will be set as static routes in the VM. type: dict spice_opts: required: false @@ -256,19 +244,15 @@ options: spice_password: required: false description: - - Password required to connect to SPICE. By default no password is set. - Please note this can be read from the Global Zone. + - Password required to connect to SPICE. By default no password is set. Please note this can be read from the Global Zone. type: str state: - choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ] + choices: [present, running, absent, deleted, stopped, created, restarted, rebooted] default: running description: - - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) - operate on a VM that is currently provisioned. V(present) means that the VM will be - created if it was absent, and that it will be in a running state. V(absent) will - shutdown the zone before removing it. - V(stopped) means the zone will be created if it does not exist already, before shutting - it down. + - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) operate on a VM that is currently provisioned. V(present) + means that the VM will be created if it was absent, and that it will be in a running state. V(absent) will shutdown the zone before removing + it. V(stopped) means the zone will be created if it does not exist already, before shutting it down. type: str tmpfs: required: false @@ -303,20 +287,17 @@ options: vnc_password: required: false description: - - Password required to connect to VNC. By default no password is set. - Please note this can be read from the Global Zone. + - Password required to connect to VNC. By default no password is set. Please note this can be read from the Global Zone. type: str vnc_port: required: false description: - - TCP port to listen of the VNC server. Or set V(0) for random, - or V(-1) to disable. + - TCP port to listen of the VNC server. Or set V(0) for random, or V(-1) to disable. type: int zfs_data_compression: required: false description: - - Specifies compression algorithm used for this VMs data dataset. This option - only has effect on delegated datasets. + - Specifies compression algorithm used for this VMs data dataset. This option only has effect on delegated datasets. type: str zfs_data_recsize: required: false @@ -336,8 +317,7 @@ options: zfs_root_compression: required: false description: - - Specifies compression algorithm used for this VMs root dataset. This option - only has effect on the zoneroot dataset. + - Specifies compression algorithm used for this VMs root dataset. This option only has effect on the zoneroot dataset. type: str zfs_root_recsize: required: false @@ -354,9 +334,9 @@ options: description: - ZFS pool the VM's zone dataset will be created in. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create SmartOS zone community.general.vmadm: brand: joyent @@ -382,9 +362,9 @@ EXAMPLES = ''' community.general.vmadm: uuid: '*' state: stopped -''' +""" -RETURN = ''' +RETURN = r""" uuid: description: UUID of the managed VM. returned: always @@ -400,7 +380,7 @@ state: returned: success type: str sample: 'running' -''' +""" import json import os diff --git a/plugins/modules/wakeonlan.py b/plugins/modules/wakeonlan.py index 6d7e094527..235be741a7 100644 --- a/plugins/modules/wakeonlan.py +++ b/plugins/modules/wakeonlan.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: wakeonlan short_description: Send a magic Wake-on-LAN (WoL) broadcast packet description: @@ -25,17 +24,17 @@ attributes: options: mac: description: - - MAC address to send Wake-on-LAN broadcast packet for. + - MAC address to send Wake-on-LAN broadcast packet for. required: true type: str broadcast: description: - - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. + - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. default: 255.255.255.255 type: str port: description: - - UDP port to use for magic Wake-on-LAN packet. + - UDP port to use for magic Wake-on-LAN packet. default: 7 type: int todo: @@ -43,16 +42,16 @@ todo: - Enable check-mode support (when we have arping support) - Does not have SecureOn password support notes: - - This module sends a magic packet, without knowing whether it worked - - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) + - This module sends a magic packet, without knowing whether it worked. + - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS). - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). seealso: -- module: community.windows.win_wakeonlan + - module: community.windows.win_wakeonlan author: -- Dag Wieers (@dagwieers) -''' + - Dag Wieers (@dagwieers) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 community.general.wakeonlan: mac: '00:00:5E:00:53:66' @@ -63,11 +62,11 @@ EXAMPLES = r''' mac: 00:00:5E:00:53:66 port: 9 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import socket import struct import traceback diff --git a/plugins/modules/wdc_redfish_command.py b/plugins/modules/wdc_redfish_command.py index 93c4811afe..680bd4b3f9 100644 --- a/plugins/modules/wdc_redfish_command.py +++ b/plugins/modules/wdc_redfish_command.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: wdc_redfish_command short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs version_added: 5.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - Manages OOB controller firmware. For example, Firmware Activate, Update and Activate. extends_documentation_fragment: - community.general.attributes @@ -38,11 +36,11 @@ options: elements: str baseuri: description: - - Base URI of OOB controller. Must include this or O(ioms). + - Base URI of OOB controller. Must include this or O(ioms). type: str ioms: description: - - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). + - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). type: list elements: str username: @@ -90,14 +88,12 @@ options: - The password for retrieving the update image. type: str notes: - - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. - - ioms is a list of FQDNs for the enclosure's IOMs. - - + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - Ioms is a list of FQDNs for the enclosure's IOMs. author: Mike Moerk (@mikemoerk) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Firmware Activate (required after SimpleUpdate to apply the new firmware) community.general.wdc_redfish_command: category: Update @@ -188,16 +184,15 @@ EXAMPLES = ''' category: Chassis resource_id: Enclosure command: PowerModeNormal +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/wdc_redfish_info.py b/plugins/modules/wdc_redfish_info.py index 03ae67fcfe..caaa9c7fd9 100644 --- a/plugins/modules/wdc_redfish_info.py +++ b/plugins/modules/wdc_redfish_info.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: wdc_redfish_info short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs version_added: 5.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote OOB controllers to get information back. extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module @@ -33,11 +31,11 @@ options: elements: str baseuri: description: - - Base URI of OOB controller. Must include this or O(ioms). + - Base URI of OOB controller. Must include this or O(ioms). type: str ioms: description: - - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). + - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). type: list elements: str username: @@ -59,13 +57,12 @@ options: type: int notes: - - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. - - ioms is a list of FQDNs for the enclosure's IOMs. - + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - Ioms is a list of FQDNs for the enclosure's IOMs. author: Mike Moerk (@mikemoerk) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get Simple Update Status with individual IOMs specified community.general.wdc_redfish_info: category: Update @@ -93,30 +90,30 @@ EXAMPLES = ''' - name: Print fetched information ansible.builtin.debug: msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}" -''' +""" -RETURN = ''' +RETURN = r""" Description: - description: Firmware update status description. - returned: always - type: str - sample: Ready for FW update + description: Firmware update status description. + returned: always + type: str + sample: Ready for FW update ErrorCode: - description: Numeric error code for firmware update status. Non-zero indicates an error condition. - returned: always - type: int - sample: 0 + description: Numeric error code for firmware update status. Non-zero indicates an error condition. + returned: always + type: int + sample: 0 EstimatedRemainingMinutes: - description: Estimated number of minutes remaining in firmware update operation. - returned: always - type: int - sample: 20 + description: Estimated number of minutes remaining in firmware update operation. + returned: always + type: int + sample: 20 StatusCode: - description: Firmware update status code. - returned: always - type: int - sample: 2 -''' + description: Firmware update status code. + returned: always + type: int + sample: 2 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native From e809a2548653f3904b346b51ac549872e53bd771 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 21:28:19 +1300 Subject: [PATCH 385/482] zypper: adjust docs (#9307) * adjust docs * Update plugins/modules/zypper.py Co-authored-by: Felix Fontein * Update plugins/modules/zypper.py Co-authored-by: Felix Fontein * Update plugins/modules/zypper_repository.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/zypper.py | 252 +++++++++++----------- plugins/modules/zypper_repository.py | 145 ++++++------- plugins/modules/zypper_repository_info.py | 97 ++++----- 3 files changed, 242 insertions(+), 252 deletions(-) diff --git a/plugins/modules/zypper.py b/plugins/modules/zypper.py index fae859fe74..ac5b6657ba 100644 --- a/plugins/modules/zypper.py +++ b/plugins/modules/zypper.py @@ -18,143 +18,141 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zypper author: - - "Patrick Callahan (@dirtyharrycallahan)" - - "Alexander Gubin (@alxgu)" - - "Thomas O'Donnell (@andytom)" - - "Robin Roth (@robinro)" - - "Andrii Radyk (@AnderEnder)" + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" short_description: Manage packages on SUSE and openSUSE description: - - Manage packages on SUSE and openSUSE using the zypper and rpm tools. - - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. + - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). extends_documentation_fragment: - - community.general.attributes - - community.general.attributes + - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - name: - description: - - Package name V(name) or package specifier or a list of either. - - Can include a version like V(name=1.0), V(name>3.4) or V(name<=2.7). If a version is given, V(oldpackage) is implied and zypper is allowed to - update the package within the version range given. - - You can also pass a url or a local path to a rpm file. - - When using O(state=latest), this can be '*', which updates all installed packages. - required: true - aliases: [ 'pkg' ] - type: list - elements: str - state: - description: - - V(present) will make sure the package is installed. - V(latest) will make sure the latest version of the package is installed. - V(absent) will make sure the specified package is not installed. - V(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. - - When using V(dist-upgrade), O(name) should be V('*'). - required: false - choices: [ present, latest, absent, dist-upgrade, installed, removed ] - default: "present" - type: str - type: - description: - - The type of package to be operated on. - required: false - choices: [ package, patch, pattern, product, srcpackage, application ] - default: "package" - type: str - extra_args_precommand: - required: false - description: - - Add additional global target options to C(zypper). - - Options should be supplied in a single line as if given in the command line. - type: str - disable_gpg_check: - description: - - Whether to disable to GPG signature checking of the package - signature being installed. Has an effect only if O(state) is - V(present) or V(latest). - required: false - default: false - type: bool - disable_recommends: - description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (V(true)) modifies zypper's default behavior; V(false) does - install recommended packages. - required: false - default: true - type: bool - force: - description: - - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. - required: false - default: false - type: bool - force_resolution: - description: - - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). - required: false - default: false - type: bool - version_added: '0.2.0' - update_cache: - description: - - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. - required: false - default: false - type: bool - aliases: [ "refresh" ] - oldpackage: - description: - - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a - version is specified as part of the package name. - required: false - default: false - type: bool - extra_args: - required: false - description: - - Add additional options to C(zypper) command. - - Options should be supplied in a single line as if given in the command line. - type: str - allow_vendor_change: - type: bool - required: false - default: false - description: - - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command. - version_added: '0.2.0' - replacefiles: - type: bool - required: false - default: false - description: - - Adds C(--replacefiles) option to I(zypper) install/update command. - version_added: '0.2.0' - clean_deps: - type: bool - required: false - default: false - description: - - Adds C(--clean-deps) option to I(zypper) remove command. - version_added: '4.6.0' + name: + description: + - Package name V(name) or package specifier or a list of either. + - Can include a version like V(name=1.0), V(name>3.4) or V(name<=2.7). If a version is given, V(oldpackage) is implied and zypper is allowed + to update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using O(state=latest), this can be V(*), which updates all installed packages. + required: true + aliases: ['pkg'] + type: list + elements: str + state: + description: + - V(present) will make sure the package is installed. + - V(latest) will make sure the latest version of the package is installed. + - V(absent) will make sure the specified package is not installed. + - V(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using V(dist-upgrade), O(name) should be V(*). + required: false + choices: [present, latest, absent, dist-upgrade, installed, removed] + default: "present" + type: str + type: + description: + - The type of package to be operated on. + required: false + choices: [package, patch, pattern, product, srcpackage, application] + default: "package" + type: str + extra_args_precommand: + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + type: str + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if O(state) is V(present) or + V(latest). + required: false + default: false + type: bool + disable_recommends: + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (V(true)) modifies zypper's default behavior; V(false) does + install recommended packages. + required: false + default: true + type: bool + force: + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: false + type: bool + force_resolution: + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: false + type: bool + version_added: '0.2.0' + update_cache: + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: false + type: bool + aliases: ["refresh"] + oldpackage: + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: false + type: bool + extra_args: + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. + type: str + allow_vendor_change: + type: bool + required: false + default: false + description: + - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command. + version_added: '0.2.0' + replacefiles: + type: bool + required: false + default: false + description: + - Adds C(--replacefiles) option to I(zypper) install/update command. + version_added: '0.2.0' + clean_deps: + type: bool + required: false + default: false + description: + - Adds C(--clean-deps) option to I(zypper) remove command. + version_added: '4.6.0' notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) + option. # informational: requirements for nodes requirements: - - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" - - python-xml - - rpm -''' + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install nmap community.general.zypper: name: nmap @@ -228,7 +226,7 @@ EXAMPLES = ''' state: present environment: ZYPP_LOCK_TIMEOUT: 20 -''' +""" import os.path import xml diff --git a/plugins/modules/zypper_repository.py b/plugins/modules/zypper_repository.py index 5a0356cc37..18f9ff0824 100644 --- a/plugins/modules/zypper_repository.py +++ b/plugins/modules/zypper_repository.py @@ -11,91 +11,88 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zypper_repository author: "Matthias Vogelgesang (@matze)" short_description: Add and remove Zypper repositories description: - - Add or remove Zypper repositories on SUSE and openSUSE + - Add or remove Zypper repositories on SUSE and openSUSE. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - A name for the repository. Not required when adding repofiles. - type: str - repo: - description: - - URI of the repository or .repo file. Required when state=present. - type: str - state: - description: - - A source string state. - choices: [ "absent", "present" ] - default: "present" - type: str + name: description: - description: - - A description of the repository - type: str - disable_gpg_check: - description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if O(state=present). - - Needs zypper version >= 1.6.2. - type: bool - default: false - autorefresh: - description: - - Enable autorefresh of the repository. - type: bool - default: true - aliases: [ "refresh" ] - priority: - description: - - Set priority of repository. Packages will always be installed - from the repository with the smallest priority number. - - Needs zypper version >= 1.12.25. - type: int - overwrite_multiple: - description: - - Overwrite multiple repository entries, if repositories with both name and - URL already exist. - type: bool - default: false - auto_import_keys: - description: - - Automatically import the gpg signing key of the new or changed repository. - - Has an effect only if O(state=present). Has no effect on existing (unchanged) repositories or in combination with O(state=absent). - - Implies runrefresh. - - Only works with C(.repo) files if `name` is given explicitly. - type: bool - default: false - runrefresh: - description: - - Refresh the package list of the given repository. - - Can be used with repo=* to refresh all repositories. - type: bool - default: false - enabled: - description: - - Set repository to enabled (or disabled). - type: bool - default: true + - A name for the repository. Not required when adding repofiles. + type: str + repo: + description: + - URI of the repository or full path of a C(.repo) file. Required when O(state=present). + type: str + state: + description: + - Whether the repository should exist or not. + - A source string state. + choices: ["absent", "present"] + default: "present" + type: str + description: + description: + - A description of the repository. + type: str + disable_gpg_check: + description: + - Whether to disable GPG signature checking of all packages. Has an effect only if O(state=present). + - Needs C(zypper) version >= 1.6.2. + type: bool + default: false + autorefresh: + description: + - Enable autorefresh of the repository. + type: bool + default: true + aliases: ["refresh"] + priority: + description: + - Set priority of repository. Packages will always be installed from the repository with the smallest priority number. + - Needs C(zypper) version >= 1.12.25. + type: int + overwrite_multiple: + description: + - Overwrite multiple repository entries, if repositories with both name and URL already exist. + type: bool + default: false + auto_import_keys: + description: + - Automatically import the gpg signing key of the new or changed repository. + - Has an effect only if O(state=present). Has no effect on existing (unchanged) repositories or in combination with O(state=absent). + - Implies O(runrefresh). + - Only works with C(.repo) files if O(name) is given explicitly. + type: bool + default: false + runrefresh: + description: + - Refresh the package list of the given repository. + - Can be used with O(repo=*) to refresh all repositories. + type: bool + default: false + enabled: + description: + - Set repository to enabled (or disabled). + type: bool + default: true requirements: - - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" - - python-xml -''' + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add NVIDIA repository for graphics drivers community.general.zypper_repository: name: nvidia-repo @@ -128,7 +125,7 @@ EXAMPLES = ''' name: my_ci_repo state: present runrefresh: true -''' +""" import traceback diff --git a/plugins/modules/zypper_repository_info.py b/plugins/modules/zypper_repository_info.py index dab4b9bbe5..9512d32eed 100644 --- a/plugins/modules/zypper_repository_info.py +++ b/plugins/modules/zypper_repository_info.py @@ -10,72 +10,67 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zypper_repository_info author: "Tobias Zeuch (@TobiasZeuch181)" version_added: 10.0.0 short_description: List Zypper repositories description: - - List Zypper repositories on SUSE and openSUSE. + - List Zypper repositories on SUSE and openSUSE. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module requirements: - - "zypper >= 1.0 (included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0)" - - python-xml + - "zypper >= 1.0 (included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0)" + - python-xml notes: - - "For info about packages, use the module M(ansible.builtin.package_facts)." -''' + - "For info about packages, use the module M(ansible.builtin.package_facts)." +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List registered repositories and store in variable repositories community.general.zypper_repository_info: {} register: repodatalist -''' +""" -RETURN = ''' +RETURN = r""" repodatalist: - description: - - A list of repository descriptions like it is returned by the command C(zypper repos). - type: list - returned: always - elements: dict - contains: - alias: - description: The alias of the repository. - type: str - autorefresh: - description: Indicates, if autorefresh is enabled on the repository. - type: int - enabled: - description: indicates, if the repository is enabled - type: int - gpgcheck: - description: indicates, if the GPG signature of the repository meta data is checked - type: int - name: - description: the name of the repository - type: str - priority: - description: the priority of the repository - type: int - url: - description: The URL of the repository on the internet. - type: str - sample: [ - { - "alias": "SLE-Product-SLES", - "autorefresh": "1", - "enabled": "1", - "gpgcheck": "1", - "name": "SLE-Product-SLES", - "priority": "99", - "url": "http://repo:50000/repo/SUSE/Products/SLE-Product-SLES/15-SP2/x86_64/product" - } - ] -''' + description: + - A list of repository descriptions like it is returned by the command C(zypper repos). + type: list + returned: always + elements: dict + contains: + alias: + description: The alias of the repository. + type: str + sample: "SLE-Product-SLES" + autorefresh: + description: Indicates, if autorefresh is enabled on the repository. + type: int + sample: "1" + enabled: + description: Indicates, if the repository is enabled. + type: int + sample: "1" + gpgcheck: + description: Indicates, if the GPG signature of the repository meta data is checked. + type: int + sample: "1" + name: + description: The name of the repository. + type: str + sample: "SLE-Product-SLES" + priority: + description: The priority of the repository. + type: int + sample: "99" + url: + description: The URL of the repository on the internet. + type: str + sample: "http://repo:50000/repo/SUSE/Products/SLE-Product-SLES/15-SP2/x86_64/product" +""" from ansible_collections.community.general.plugins.module_utils import deps From d05d067f3b84622c8c9db3dec667b56b16d05df6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 21:40:39 +1300 Subject: [PATCH 386/482] z mods adjust docs (#9306) z* modules: adjust docs (re-commiting with adjustment from PR) --- plugins/modules/zfs_facts.py | 76 ++++--------------- plugins/modules/znode.py | 134 ++++++++++++++++----------------- plugins/modules/zpool_facts.py | 134 ++++++++++++--------------------- 3 files changed, 128 insertions(+), 216 deletions(-) diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py index 25fd10099b..115e8e3e7a 100644 --- a/plugins/modules/zfs_facts.py +++ b/plugins/modules/zfs_facts.py @@ -73,83 +73,33 @@ EXAMPLES = r""" RETURN = r""" name: - description: ZFS dataset name + description: ZFS dataset name. returned: always type: str sample: rpool/var/spool parsable: - description: if parsable output should be provided in machine friendly format. + description: If parsable output should be provided in machine friendly format. returned: if O(parsable=True) type: bool sample: true recurse: - description: if we should recurse over ZFS dataset + description: If we should recurse over ZFS dataset. returned: if O(recurse=True) type: bool sample: true zfs_datasets: - description: ZFS dataset facts + description: ZFS dataset facts. returned: always type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } + sample: {"aclinherit": "restricted", "aclmode": "discard", "atime": "on", "available": "43.8G", "canmount": "on", "casesensitivity": "sensitive", + "checksum": "on", "compression": "off", "compressratio": "1.00x", "copies": "1", "creation": "Thu Jun 16 11:37 2016", "dedup": "off", "devices": "on", + "exec": "on", "filesystem_count": "none", "filesystem_limit": "none", "logbias": "latency", "logicalreferenced": "18.5K", "logicalused": "3.45G", + "mlslabel": "none", "mounted": "yes", "mountpoint": "/rpool", "name": "rpool", "nbmand": "off", "normalization": "none", "org.openindiana.caiman:install": + "ready", "primarycache": "all", "quota": "none", "readonly": "off", "recordsize": "128K", "redundant_metadata": "all", "refcompressratio": "1.00x", + "referenced": "29.5K", "refquota": "none", "refreservation": "none", "reservation": "none", "secondarycache": "all", "setuid": "on", "sharenfs": "off", + "sharesmb": "off", "snapdir": "hidden", "snapshot_count": "none", "snapshot_limit": "none", "sync": "standard", "type": "filesystem", "used": "4.41G", + "usedbychildren": "4.41G", "usedbydataset": "29.5K", "usedbyrefreservation": "0", "usedbysnapshots": "0", "utf8only": "off", "version": "5", + "vscan": "off", "written": "29.5K", "xattr": "on", "zoned": "off"} """ from collections import defaultdict diff --git a/plugins/modules/znode.py b/plugins/modules/znode.py index e8f7f1dc76..ca59704d12 100644 --- a/plugins/modules/znode.py +++ b/plugins/modules/znode.py @@ -8,83 +8,81 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: znode short_description: Create, delete, retrieve, and update znodes using ZooKeeper description: - - Create, delete, retrieve, and update znodes using ZooKeeper. + - Create, delete, retrieve, and update znodes using ZooKeeper. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none extends_documentation_fragment: - - community.general.attributes + - community.general.attributes options: - hosts: - description: - - A list of ZooKeeper servers (format '[server]:[port]'). - required: true - type: str - name: - description: - - The path of the znode. - required: true - type: str - value: - description: - - The value assigned to the znode. - type: str - op: - description: - - An operation to perform. Mutually exclusive with state. - choices: [ get, wait, list ] - type: str - state: - description: - - The state to enforce. Mutually exclusive with op. - choices: [ present, absent ] - type: str - timeout: - description: - - The amount of time to wait for a node to appear. - default: 300 - type: int - recursive: - description: - - Recursively delete node and all its children. - type: bool - default: false - auth_scheme: - description: - - 'Authentication scheme.' - choices: [ digest, sasl ] - type: str - default: "digest" - required: false - version_added: 5.8.0 - auth_credential: - description: - - The authentication credential value. Depends on O(auth_scheme). - - The format for O(auth_scheme=digest) is C(user:password), - and the format for O(auth_scheme=sasl) is C(user:password). - type: str - required: false - version_added: 5.8.0 - use_tls: - description: - - Using TLS/SSL or not. - type: bool - default: false - required: false - version_added: '6.5.0' + hosts: + description: + - A list of ZooKeeper servers (format V([server]:[port])). + required: true + type: str + name: + description: + - The path of the znode. + required: true + type: str + value: + description: + - The value assigned to the znode. + type: str + op: + description: + - An operation to perform. Mutually exclusive with state. + choices: [get, wait, list] + type: str + state: + description: + - The state to enforce. Mutually exclusive with op. + choices: [present, absent] + type: str + timeout: + description: + - The amount of time to wait for a node to appear. + default: 300 + type: int + recursive: + description: + - Recursively delete node and all its children. + type: bool + default: false + auth_scheme: + description: + - 'Authentication scheme.' + choices: [digest, sasl] + type: str + default: "digest" + required: false + version_added: 5.8.0 + auth_credential: + description: + - The authentication credential value. Depends on O(auth_scheme). + - The format for O(auth_scheme=digest) is C(user:password), and the format for O(auth_scheme=sasl) is C(user:password). + type: str + required: false + version_added: 5.8.0 + use_tls: + description: + - Using TLS/SSL or not. + type: bool + default: false + required: false + version_added: '6.5.0' requirements: - - kazoo >= 2.1 + - kazoo >= 2.1 author: "Trey Perry (@treyperry)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating or updating a znode with a given value community.general.znode: hosts: 'localhost:2181' diff --git a/plugins/modules/zpool_facts.py b/plugins/modules/zpool_facts.py index 2477a920b0..e0b87b570c 100644 --- a/plugins/modules/zpool_facts.py +++ b/plugins/modules/zpool_facts.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zpool_facts short_description: Gather facts about ZFS pools description: @@ -21,29 +20,28 @@ extends_documentation_fragment: - community.general.attributes.facts - community.general.attributes.facts_module options: - name: - description: - - ZFS pool name. - type: str - aliases: [ "pool", "zpool" ] - required: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: false - required: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zpool(1M) man page. - type: str - default: all - required: false -''' + name: + description: + - ZFS pool name. + type: str + aliases: ["pool", "zpool"] + required: false + parsable: + description: + - Specifies if property values should be displayed in machine friendly format. + type: bool + default: false + required: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zpool(1M) + man page. + type: str + default: all + required: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather facts about ZFS pool rpool community.general.zpool_facts: pool=rpool @@ -54,71 +52,37 @@ EXAMPLES = ''' ansible.builtin.debug: msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' with_items: '{{ ansible_zfs_pools }}' -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: - description: Dictionary containing all the detailed information about the ZFS pool facts - returned: always - type: complex - contains: - ansible_zfs_pools: - description: ZFS pool facts - returned: always - type: str - sample: - { - "allocated": "3.46G", - "altroot": "-", - "autoexpand": "off", - "autoreplace": "off", - "bootfs": "rpool/ROOT/openindiana", - "cachefile": "-", - "capacity": "6%", - "comment": "-", - "dedupditto": "0", - "dedupratio": "1.00x", - "delegation": "on", - "expandsize": "-", - "failmode": "wait", - "feature@async_destroy": "enabled", - "feature@bookmarks": "enabled", - "feature@edonr": "enabled", - "feature@embedded_data": "active", - "feature@empty_bpobj": "active", - "feature@enabled_txg": "active", - "feature@extensible_dataset": "enabled", - "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", - "feature@large_blocks": "enabled", - "feature@lz4_compress": "active", - "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", - "feature@skein": "enabled", - "feature@spacemap_histogram": "active", - "fragmentation": "3%", - "free": "46.3G", - "freeing": "0", - "guid": "15729052870819522408", - "health": "ONLINE", - "leaked": "0", - "listsnapshots": "off", - "name": "rpool", - "readonly": "off", - "size": "49.8G", - "version": "-" - } + description: Dictionary containing all the detailed information about the ZFS pool facts. + returned: always + type: complex + contains: + ansible_zfs_pools: + description: ZFS pool facts. + returned: always + type: str + sample: {"allocated": "3.46G", "altroot": "-", "autoexpand": "off", "autoreplace": "off", "bootfs": "rpool/ROOT/openindiana", "cachefile": "-", + "capacity": "6%", "comment": "-", "dedupditto": "0", "dedupratio": "1.00x", "delegation": "on", "expandsize": "-", "failmode": "wait", + "feature@async_destroy": "enabled", "feature@bookmarks": "enabled", "feature@edonr": "enabled", "feature@embedded_data": "active", + "feature@empty_bpobj": "active", "feature@enabled_txg": "active", "feature@extensible_dataset": "enabled", "feature@filesystem_limits": "enabled", + "feature@hole_birth": "active", "feature@large_blocks": "enabled", "feature@lz4_compress": "active", "feature@multi_vdev_crash_dump": "enabled", + "feature@sha512": "enabled", "feature@skein": "enabled", "feature@spacemap_histogram": "active", "fragmentation": "3%", "free": "46.3G", + "freeing": "0", "guid": "15729052870819522408", "health": "ONLINE", "leaked": "0", "listsnapshots": "off", "name": "rpool", "readonly": "off", + "size": "49.8G", "version": "-"} name: - description: ZFS pool name - returned: always - type: str - sample: rpool + description: ZFS pool name. + returned: always + type: str + sample: rpool parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: true -''' + description: If parsable output should be provided in machine friendly format. + returned: if O(parsable=true) + type: bool + sample: true +""" from collections import defaultdict From c7edf0a87b38fc8ef2058638efd9c2d5a0bdf492 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 22:02:12 +1300 Subject: [PATCH 387/482] become plugins: use f-strings (#9319) * become plugins: use f-strings * add changelog frag --- changelogs/fragments/9319-fstr-become-plugins.yml | 10 ++++++++++ plugins/become/doas.py | 4 ++-- plugins/become/dzdo.py | 8 ++++---- plugins/become/ksu.py | 2 +- plugins/become/machinectl.py | 2 +- plugins/become/pbrun.py | 4 ++-- plugins/become/pfexec.py | 2 +- plugins/become/pmrun.py | 2 +- plugins/become/sesu.py | 2 +- plugins/become/sudosu.py | 10 +++++----- 10 files changed, 28 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/9319-fstr-become-plugins.yml diff --git a/changelogs/fragments/9319-fstr-become-plugins.yml b/changelogs/fragments/9319-fstr-become-plugins.yml new file mode 100644 index 0000000000..dcdc4b3f52 --- /dev/null +++ b/changelogs/fragments/9319-fstr-become-plugins.yml @@ -0,0 +1,10 @@ +minor_changes: + - doas become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - dzdo become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - ksu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - machinectl become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - pbrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - pfexec become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - pmrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - sesu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). + - sudosu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 761e5e1e95..9d3a73b5b0 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -125,9 +125,9 @@ class BecomeModule(BecomeBase): flags += ' -n' become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' success_cmd = self._build_success_command(cmd, shell, noexe=True) executable = getattr(shell, 'executable', shell.SHELL_FAMILY) - return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd) + return f'{become_exe} {flags} {user} {executable} -c {success_cmd}' diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index d94c684d1f..323d1b7395 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -92,10 +92,10 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') if self.get_option('become_pass'): - self.prompt = '[dzdo via ansible, key=%s] password:' % self._id - flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) + self.prompt = f'[dzdo via ansible, key={self._id}] password:' + flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\"" become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' - return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)]) + return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index 2be1832dc2..8ee8f89512 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -124,4 +124,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell)) + return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} ' diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index a0467c2c36..1dd648e6e0 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -123,7 +123,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}' def check_success(self, b_output): b_output = self.remove_ansi_codes(b_output) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index 8a96b75797..ccba615a85 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -103,7 +103,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' noexe = not self.get_option('wrap_exe') - return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)]) + return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index d48d622713..d3b98a6317 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -106,4 +106,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') noexe = not self.get_option('wrap_exe') - return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe)) + return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}' diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 908c5e759d..093906214d 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -78,4 +78,4 @@ class BecomeModule(BecomeBase): become = self.get_option('become_exe') flags = self.get_option('become_flags') - return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell))) + return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}' diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 4dcb837e70..08dfdfca54 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -93,4 +93,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}' diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 5454fd2316..5dae85a192 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -98,16 +98,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') or '' prompt = '' if self.get_option('become_pass'): - self.prompt = '[sudo via ansible, key=%s] password:' % self._id + self.prompt = f'[sudo via ansible, key={self._id}] password:' if flags: # this could be simplified, but kept as is for now for backwards string matching flags = flags.replace('-n', '') - prompt = '-p "%s"' % (self.prompt) + prompt = f'-p "{self.prompt}"' user = self.get_option('become_user') or '' if user: - user = '%s' % (user) + user = f'{user}' if self.get_option('alt_method'): - return ' '.join([becomecmd, flags, prompt, "su -l", user, "-c", self._build_success_command(cmd, shell, True)]) + return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}" else: - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}" From cb2cd00cd17492207aad2280b40455fe0aa5a034 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 22:02:58 +1300 Subject: [PATCH 388/482] cache plugins: use f-strings (#9320) * cache plugins: use f-strings * add changelog frag --- changelogs/fragments/9320-fstr-cache-plugins.yml | 3 +++ plugins/cache/memcached.py | 2 +- plugins/cache/redis.py | 6 +++--- 3 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/9320-fstr-cache-plugins.yml diff --git a/changelogs/fragments/9320-fstr-cache-plugins.yml b/changelogs/fragments/9320-fstr-cache-plugins.yml new file mode 100644 index 0000000000..cc1aa6ea2e --- /dev/null +++ b/changelogs/fragments/9320-fstr-cache-plugins.yml @@ -0,0 +1,3 @@ +minor_changes: + - memcached cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320). + - redis cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320). diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 93131172c5..a70c3cb121 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -191,7 +191,7 @@ class CacheModule(BaseCacheModule): self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or []) def _make_key(self, key): - return "{0}{1}".format(self._prefix, key) + return f"{self._prefix}{key}" def _expire_keys(self): if self._timeout > 0: diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index f96aafaa84..aa0243b9dd 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -131,7 +131,7 @@ class CacheModule(BaseCacheModule): connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) - display.vv('Redis connection: %s' % self._db) + display.vv(f'Redis connection: {self._db}') @staticmethod def _parse_connection(re_patt, uri): @@ -164,12 +164,12 @@ class CacheModule(BaseCacheModule): pass # password is optional sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv('\nUsing redis sentinels: %s' % sentinels) + display.vv(f'\nUsing redis sentinels: {sentinels}') scon = Sentinel(sentinels, **kw) try: return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) except Exception as exc: - raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc)) + raise AnsibleError(f'Could not connect to redis sentinel: {to_native(exc)}') def _make_key(self, key): return self._prefix + key From 1d8f0b294216971fd596c1582990e888b33e76c0 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 23:02:30 +1300 Subject: [PATCH 389/482] inventory plugins: use f-strings (#9323) * inventory plugins: use f-strings * add changelog frag --- .../fragments/9323-fstr-inventory-plugins.yml | 14 ++++ plugins/inventory/cobbler.py | 40 +++++----- plugins/inventory/gitlab_runners.py | 2 +- plugins/inventory/icinga2.py | 28 +++---- plugins/inventory/linode.py | 2 +- plugins/inventory/lxd.py | 78 +++++++++---------- plugins/inventory/nmap.py | 8 +- plugins/inventory/online.py | 6 +- plugins/inventory/opennebula.py | 8 +- plugins/inventory/proxmox.py | 56 +++++++------ plugins/inventory/scaleway.py | 4 +- plugins/inventory/stackpath_compute.py | 22 +++--- plugins/inventory/virtualbox.py | 2 +- plugins/inventory/xen_orchestra.py | 27 +++---- 14 files changed, 148 insertions(+), 149 deletions(-) create mode 100644 changelogs/fragments/9323-fstr-inventory-plugins.yml diff --git a/changelogs/fragments/9323-fstr-inventory-plugins.yml b/changelogs/fragments/9323-fstr-inventory-plugins.yml new file mode 100644 index 0000000000..03ded1f0ec --- /dev/null +++ b/changelogs/fragments/9323-fstr-inventory-plugins.yml @@ -0,0 +1,14 @@ +minor_changes: + - cobbler inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - gitlab_runners inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - icinga2 inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - linode inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - lxd inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - nmap inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - online inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - opennebula inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - proxmox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - scaleway inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - stackpath_compute inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - virtualbox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). + - xen_orchestra inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 664380da8f..ef88dae18c 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -160,7 +160,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): raise AnsibleError('Could not import xmlrpc client library') if self.connection is None: - self.display.vvvv('Connecting to %s\n' % self.cobbler_url) + self.display.vvvv(f'Connecting to {self.cobbler_url}\n') self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) self.token = None if self.get_option('user') is not None: @@ -211,7 +211,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): return self._cache[self.cache_key]['systems'] def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", "")))) + group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}")) if child is not None: self.inventory.add_child(group_name, child) return group_name @@ -243,16 +243,16 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): for profile in self._get_profiles(): if profile['parent']: - self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent'])) + self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n") if not self._exclude_profile(profile['parent']): parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv('Added profile parent group %s\n' % parent_group_name) + self.display.vvvv(f'Added profile parent group {parent_group_name}\n') if not self._exclude_profile(profile['name']): group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') self.inventory.add_child(parent_group_name, group_name) else: - self.display.vvvv('Processing profile %s without parent\n' % profile['name']) + self.display.vvvv(f"Processing profile {profile['name']} without parent\n") # Create a hierarchy of profile names profile_elements = profile['name'].split('-') i = 0 @@ -260,12 +260,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): profile_group = '-'.join(profile_elements[0:i + 1]) profile_group_child = '-'.join(profile_elements[0:i + 2]) if self._exclude_profile(profile_group): - self.display.vvvv('Excluding profile %s\n' % profile_group) + self.display.vvvv(f'Excluding profile {profile_group}\n') break group_name = self._add_safe_group_name(profile_group) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name)) + self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n') self.inventory.add_child(group_name, child_group_name) i = i + 1 @@ -273,7 +273,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.group = to_safe_group_name(self.get_option('group')) if self.group is not None and self.group != '': self.inventory.add_group(self.group) - self.display.vvvv('Added site group %s\n' % self.group) + self.display.vvvv(f'Added site group {self.group}\n') ip_addresses = {} ipv6_addresses = {} @@ -286,14 +286,14 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): interfaces = host['interfaces'] if set(host['mgmt_classes']) & set(self.include_mgmt_classes): - self.display.vvvv('Including host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes'])) + self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") else: if self._exclude_profile(host['profile']): - self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile'])) + self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n") continue if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes): - self.display.vvvv('Excluding host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes'])) + self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") continue # hostname is often empty for non-static IP hosts @@ -303,21 +303,21 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name != "": hostname = make_unsafe(this_dns_name) - self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) + self.display.vvvv(f'Set hostname to {hostname} from {iname}\n') if hostname == '': - self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name']) + self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n") continue self.inventory.add_host(hostname) - self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname)) + self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n") # Add host to profile group if host['profile'] != '': group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name)) + self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n') else: - self.display.warning('Host %s has an empty profile\n' % (hostname)) + self.display.warning(f'Host {hostname} has an empty profile\n') # Add host to groups specified by group_by fields for group_by in self.group_by: @@ -327,7 +327,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] for group in groups: group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name)) + self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n') # Add to group for this inventory if self.group is not None: @@ -377,7 +377,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): try: self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) except ValueError as e: - self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) + self.display.warning(f"Could not set host info for {hostname}: {to_text(e)}") if self.get_option('want_ip_addresses'): self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index bd29e8d310..a5f53b8b14 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -124,7 +124,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) except Exception as e: - raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e)) + raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {to_native(e)}') def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index d1f2bc617f..527a329173 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -141,7 +141,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): 'User-Agent': "ansible-icinga2-inv", 'Accept': "application/json", } - api_status_url = self.icinga2_url + "/status" + api_status_url = f"{self.icinga2_url}/status" request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -151,7 +151,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): open_url(api_status_url, **request_args) def _post_request(self, request_url, data=None): - self.display.vvv("Requested URL: %s" % request_url) + self.display.vvv(f"Requested URL: {request_url}") request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -160,42 +160,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } if data is not None: request_args['data'] = json.dumps(data) - self.display.vvv("Request Args: %s" % request_args) + self.display.vvv(f"Request Args: {request_args}") try: response = open_url(request_url, **request_args) except HTTPError as e: try: error_body = json.loads(e.read().decode()) - self.display.vvv("Error returned: {0}".format(error_body)) + self.display.vvv(f"Error returned: {error_body}") except Exception: error_body = {"status": None} if e.code == 404 and error_body.get('status') == "No objects found.": raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid") - raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body)) + raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() json_data = json.loads(response_body.decode('utf-8')) - self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True)) + self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}") if 200 <= response.status <= 299: return json_data if response.status == 404 and json_data['status'] == "No objects found.": raise AnsibleParserError( - "API returned no data -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API returned no data -- Response: {response.status} - {json_data['status']}") if response.status == 401: raise AnsibleParserError( - "API was unable to complete query -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API was unable to complete query -- Response: {response.status} - {json_data['status']}") if response.status == 500: raise AnsibleParserError( - "API Response - %s - %s" - % (json_data['status'], json_data['errors'])) + f"API Response - {json_data['status']} - {json_data['errors']}") raise AnsibleParserError( - "Unexpected data returned - %s - %s" - % (json_data['status'], json_data['errors'])) + f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): - query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + query_hosts_url = f"{self.icinga2_url}/objects/hosts" self.headers['X-HTTP-Method-Override'] = 'GET' data_dict = dict() if hosts: @@ -302,7 +298,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if self.templar.is_template(self.icinga2_password): self.icinga2_password = self.templar.template(variable=self.icinga2_password, disable_lookups=False) - self.icinga2_url = self.icinga2_url.rstrip('/') + '/v1' + self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1" # Not currently enabled # self.cache_key = self.get_cache_key(path) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 5c9a4718f5..46f2faeace 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -161,7 +161,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self.instances = self.client.linode.instances() except LinodeApiError as exception: - raise AnsibleError('Linode client raised: %s' % exception) + raise AnsibleError(f'Linode client raised: {exception}') def _add_groups(self): """Add Linode instance groups to the dynamic inventory.""" diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 9ae004f6c5..1e135f7415 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -211,7 +211,7 @@ class InventoryModule(BaseInventoryPlugin): with open(path, 'r') as json_file: return json.load(json_file) except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) + raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {to_native(err)}') def save_json_data(self, path, file_name=None): """save data as json @@ -241,7 +241,7 @@ class InventoryModule(BaseInventoryPlugin): with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: json.dump(self.data, json_file) except IOError as err: - raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) + raise AnsibleParserError(f'Could not save data: {to_native(err)}') def verify_file(self, path): """Check the config @@ -281,7 +281,7 @@ class InventoryModule(BaseInventoryPlugin): if not isinstance(url, str): return False if not url.startswith(('unix:', 'https:')): - raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) + raise AnsibleError(f'URL is malformed: {to_native(url)}') return True def _connect_to_socket(self): @@ -306,7 +306,7 @@ class InventoryModule(BaseInventoryPlugin): return socket_connection except LXDClientException as err: error_storage[url] = err - raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) + raise AnsibleError(f'No connection to the socket: {to_native(error_storage)}') def _get_networks(self): """Get Networknames @@ -355,7 +355,7 @@ class InventoryModule(BaseInventoryPlugin): # } url = '/1.0/instances' if self.project: - url = url + '?{0}'.format(urlencode(dict(project=self.project))) + url = f"{url}?{urlencode(dict(project=self.project))}" instances = self.socket.do('GET', url) @@ -383,10 +383,10 @@ class InventoryModule(BaseInventoryPlugin): config = {} if isinstance(branch, (tuple, list)): config[name] = {branch[1]: self.socket.do( - 'GET', '/1.0/{0}/{1}/{2}?{3}'.format(to_native(branch[0]), to_native(name), to_native(branch[1]), urlencode(dict(project=self.project))))} + 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')} else: config[name] = {branch: self.socket.do( - 'GET', '/1.0/{0}/{1}?{2}'.format(to_native(branch), to_native(name), urlencode(dict(project=self.project))))} + 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')} return config def get_instance_data(self, names): @@ -449,7 +449,7 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(network_configuration): network config""" - instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network') network_configuration = None if instance_network_interfaces: network_configuration = {} @@ -462,7 +462,7 @@ class InventoryModule(BaseInventoryPlugin): address_set['family'] = address.get('family') address_set['address'] = address.get('address') address_set['netmask'] = address.get('netmask') - address_set['combined'] = address.get('address') + '/' + address.get('netmask') + address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}" network_configuration[interface_name].append(address_set) return network_configuration @@ -479,7 +479,7 @@ class InventoryModule(BaseInventoryPlugin): None Returns: str(prefered_interface): None or interface name""" - instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') prefered_interface = None # init if instance_network_interfaces: # instance have network interfaces # generator if interfaces which start with the desired pattern @@ -516,7 +516,7 @@ class InventoryModule(BaseInventoryPlugin): # "network":"lxdbr0", # "type":"nic"}, vlan_ids = {} - devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name))) + devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices') for device in devices: if 'network' in devices[device]: if devices[device]['network'] in network_vlans: @@ -579,7 +579,7 @@ class InventoryModule(BaseInventoryPlugin): else: path[instance_name][key] = value except KeyError as err: - raise AnsibleParserError("Unable to store Information: {0}".format(to_native(err))) + raise AnsibleParserError(f"Unable to store Information: {to_native(err)}") def extract_information_from_instance_configs(self): """Process configuration information @@ -600,24 +600,24 @@ class InventoryModule(BaseInventoryPlugin): for instance_name in self.data['instances']: self._set_data_entry(instance_name, 'os', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.os'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.os')) self._set_data_entry(instance_name, 'release', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.release'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.release')) self._set_data_entry(instance_name, 'version', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.version'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.version')) self._set_data_entry(instance_name, 'profile', self._get_data_entry( - 'instances/{0}/instances/metadata/profiles'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/profiles')) self._set_data_entry(instance_name, 'location', self._get_data_entry( - 'instances/{0}/instances/metadata/location'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/location')) self._set_data_entry(instance_name, 'state', self._get_data_entry( - 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power')) self._set_data_entry(instance_name, 'type', self._get_data_entry( - 'instances/{0}/instances/metadata/type'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/type')) self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) self._set_data_entry(instance_name, 'project', self._get_data_entry( - 'instances/{0}/instances/metadata/project'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/project')) def build_inventory_network(self, instance_name): """Add the network interfaces of the instance to the inventory @@ -651,18 +651,18 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(interface_name: ip)""" - prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None + prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None prefered_instance_network_family = self.prefered_instance_network_family ip_address = '' if prefered_interface: - interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface)) + interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}') for config in interface: if config['family'] == prefered_instance_network_family: ip_address = config['address'] break else: - interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') for interface in interfaces.values(): for config in interface: if config['family'] == prefered_instance_network_family: @@ -670,7 +670,7 @@ class InventoryModule(BaseInventoryPlugin): break return ip_address - if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces + if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) else: @@ -691,7 +691,7 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" for instance_name in self.data['inventory']: - instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower() + instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower() # Only consider instances that match the "state" filter, if self.state is not None if self.filter: @@ -703,34 +703,34 @@ class InventoryModule(BaseInventoryPlugin): # add network information self.build_inventory_network(instance_name) # add os - v = self._get_data_entry('inventory/{0}/os'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/os') if v: self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) # add release - v = self._get_data_entry('inventory/{0}/release'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/release') if v: self.inventory.set_variable( instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) # add profile self.inventory.set_variable( - instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry('inventory/{0}/profile'.format(instance_name)))) + instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile'))) # add state self.inventory.set_variable( instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) # add type self.inventory.set_variable( - instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry('inventory/{0}/type'.format(instance_name)))) + instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type'))) # add location information - if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None' + if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None' self.inventory.set_variable( - instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry('inventory/{0}/location'.format(instance_name)))) + instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location'))) # add VLAN_ID information - if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)): + if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'): self.inventory.set_variable( - instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))) + instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids'))) # add project self.inventory.set_variable( - instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry('inventory/{0}/project'.format(instance_name)))) + instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project'))) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -792,7 +792,7 @@ class InventoryModule(BaseInventoryPlugin): network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) except ValueError as err: raise AnsibleParserError( - 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) + f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {to_native(err)}") for instance_name in self.inventory.hosts: if self.data['inventory'][instance_name].get('network_interfaces') is not None: @@ -997,12 +997,12 @@ class InventoryModule(BaseInventoryPlugin): elif self.groupby[group_name].get('type') == 'project': self.build_inventory_groups_project(group_name) else: - raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) + raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}') if self.groupby: for group_name in self.groupby: if not group_name.isalnum(): - raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) + raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}') group_type(make_unsafe(group_name)) def build_inventory(self): @@ -1039,7 +1039,7 @@ class InventoryModule(BaseInventoryPlugin): None""" iter_keys = list(self.data['instances'].keys()) for instance_name in iter_keys: - if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter: + if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter: del self.data['instances'][instance_name] def _populate(self): @@ -1120,6 +1120,6 @@ class InventoryModule(BaseInventoryPlugin): self.url = self.get_option('url') except Exception as err: raise AnsibleParserError( - 'All correct options required: {0}'.format(to_native(err))) + f'All correct options required: {to_native(err)}') # Call our internal helper to populate the dynamic inventory self._populate() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 48f02c446b..fbc8a76173 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -178,7 +178,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self._nmap = get_bin_path('nmap') except ValueError as e: - raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) + raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {to_native(e)}') super(InventoryModule, self).parse(inventory, loader, path, cache=cache) @@ -248,7 +248,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) + raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}') # parse results host = None @@ -259,7 +259,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: t_stdout = to_text(stdout, errors='surrogate_or_strict') except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + raise AnsibleParserError(f'Invalid (non unicode) input returned: {to_native(e)}') for line in t_stdout.splitlines(): hits = self.find_host.match(line) @@ -300,7 +300,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): results[-1]['ports'] = ports except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + raise AnsibleParserError(f"failed to parse {to_native(path)}: {to_native(e)} ") if cache_needs_update: self._cache[cache_key] = results diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 70b8d14192..9475049c08 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -138,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin): try: response = open_url(url, headers=self.headers) except Exception as e: - self.display.warning("An error happened while fetching: %s" % url) + self.display.warning(f"An error happened while fetching: {url}") return None try: @@ -245,8 +245,8 @@ class InventoryModule(BaseInventoryPlugin): } self.headers = { - 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), + 'Authorization': f"Bearer {token}", + 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", 'Content-type': 'application/json' } diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 077d3da5a3..2750d4d370 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -128,9 +128,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): authstring = fp.read().rstrip() username, password = authstring.split(":") except (OSError, IOError): - raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'") except Exception: - raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'") auth_params = namedtuple('auth', ('url', 'username', 'password')) @@ -166,13 +166,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not (auth.username and auth.password): raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}") # get hosts (VMs) try: vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3) except Exception as e: - raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e))) + raise AnsibleError(f"Something happened during XML-RPC call: {to_native(e)}") return vm_pool diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 38877b895c..e4a39df3d7 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -284,12 +284,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if self.proxmox_password: credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) + ret = a.post(f'{self.proxmox_url}/api2/json/access/ticket', data=credentials) json = ret.json() self.headers = { # only required for POST/PUT/DELETE methods, which we are not using currently # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket']) + 'Cookie': f"PVEAuthCookie={json['data']['ticket']}" } else: # Clean and format token components @@ -341,23 +341,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return make_unsafe(self._cache[self.cache_key][url]) def _get_nodes(self): - return self._get_json("%s/api2/json/nodes" % self.proxmox_url) + return self._get_json(f"{self.proxmox_url}/api2/json/nodes") def _get_pools(self): - return self._get_json("%s/api2/json/pools" % self.proxmox_url) + return self._get_json(f"{self.proxmox_url}/api2/json/pools") def _get_lxc_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node)) + return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc") def _get_qemu_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node)) + return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/qemu") def _get_members_per_pool(self, pool): - ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool)) + ret = self._get_json(f"{self.proxmox_url}/api2/json/pools/{pool}") return ret['members'] def _get_node_ip(self, node): - ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node)) + ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/network") for iface in ret: try: @@ -371,7 +371,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if status_key not in properties or not properties[status_key] == 'running': return - ret = self._get_json("%s/api2/json/nodes/%s/lxc/%s/interfaces" % (self.proxmox_url, node, vmid), ignore_errors=[501]) + ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc/{vmid}/interfaces", ignore_errors=[501]) if not ret: return @@ -398,9 +398,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: ifaces = self._get_json( - "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( - self.proxmox_url, node, vmtype, vmid - ) + f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/agent/network-get-interfaces" )['result'] if "error" in ifaces: @@ -418,7 +416,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): result.append({ 'name': iface['name'], 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] + 'ip-addresses': [f"{ip['ip-address']}/{ip['prefix']}" for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] }) except requests.HTTPError: pass @@ -426,7 +424,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return result def _get_vm_config(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) + ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/config") properties[self._fact('node')] = node properties[self._fact('vmid')] = vmid @@ -442,13 +440,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: # fixup disk images as they have no key if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = ('disk_image=' + value) + value = f"disk_image={value}" # Additional field containing parsed tags as list if config == 'tags': stripped_value = value.strip() if stripped_value: - parsed_key = key + "_parsed" + parsed_key = f"{key}_parsed" properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")] # The first field in the agent string tells you whether the agent is enabled @@ -464,7 +462,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if agent_enabled: agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) if agent_iface_value: - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) + agent_iface_key = self.to_safe(f'{key}_interfaces') properties[agent_iface_key] = agent_iface_value if config == 'lxc': @@ -489,13 +487,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return None def _get_vm_status(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) + ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/status/current") properties[self._fact('status')] = ret['status'] if vmtype == 'qemu': properties[self._fact('qmpstatus')] = ret['qmpstatus'] def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) + ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/snapshot") snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] properties[self._fact('snapshots')] = snapshots @@ -509,11 +507,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _fact(self, name): '''Generate a fact's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.facts_prefix, name.lower())) + return self.to_safe(f'{self.facts_prefix}{name.lower()}') def _group(self, name): '''Generate a group's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.group_prefix, name.lower())) + return self.to_safe(f'{self.group_prefix}{name.lower()}') def _can_add_host(self, name, properties): '''Ensure that a host satisfies all defined hosts filters. If strict mode is @@ -525,7 +523,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not self._compose(host_filter, properties): return False except Exception as e: # pylint: disable=broad-except - message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e)) + message = f"Could not evaluate host filter {host_filter} for host {name} - {to_native(e)}" if self.strict: raise AnsibleError(message) display.warning(message) @@ -566,8 +564,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # add the host to the inventory self._add_host(name, properties) - node_type_group = self._group('%s_%s' % (node, ittype)) - self.inventory.add_child(self._group('all_' + ittype), name) + node_type_group = self._group(f'{node}_{ittype}') + self.inventory.add_child(self._group(f"all_{ittype}"), name) self.inventory.add_child(node_type_group, name) item_status = item['status'] @@ -575,7 +573,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'): # get more details about the status of the qemu VM item_status = properties.get(self._fact('qmpstatus'), item_status) - self.inventory.add_child(self._group('all_%s' % (item_status, )), name) + self.inventory.add_child(self._group(f'all_{item_status}'), name) return name @@ -586,7 +584,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): poolid = pool.get('poolid') if not poolid: continue - pool_group = self._group('pool_' + poolid) + pool_group = self._group(f"pool_{poolid}") self.inventory.add_group(pool_group) for member in self._get_members_per_pool(poolid): @@ -603,7 +601,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): default_groups.extend(['prelaunch', 'paused']) for group in default_groups: - self.inventory.add_group(self._group('all_%s' % (group))) + self.inventory.add_group(self._group(f'all_{group}')) nodes_group = self._group('nodes') if not self.exclude_nodes: self.inventory.add_group(nodes_group) @@ -636,7 +634,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # add LXC/Qemu groups for the node for ittype in ('lxc', 'qemu'): - node_type_group = self._group('%s_%s' % (node['node'], ittype)) + node_type_group = self._group(f"{node['node']}_{ittype}") self.inventory.add_group(node_type_group) # get LXC containers and Qemu VMs for this node @@ -665,7 +663,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): v = self.get_option(o) if self.templar.is_template(v): v = self.templar.template(v, disable_lookups=False) - setattr(self, 'proxmox_%s' % o, v) + setattr(self, f'proxmox_{o}', v) # some more cleanup and validation self.proxmox_url = self.proxmox_url.rstrip('/') diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 4205caeca7..7fc5f12c44 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -140,7 +140,7 @@ def _fetch_information(token, url): headers={'X-Auth-Token': token, 'Content-type': 'application/json'}) except Exception as e: - raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) + raise AnsibleError(f"Error while fetching {url}: {to_native(e)}") try: raw_json = json.loads(to_text(response.read())) except ValueError: @@ -161,7 +161,7 @@ def _fetch_information(token, url): def _build_server_url(api_endpoint): - return "/".join([api_endpoint, "servers"]) + return f"{api_endpoint}/servers" def extract_public_ipv4(server_info): diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 8508b4e797..c87d0e5277 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -139,7 +139,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): "Content-Type": "application/json", } resp = open_url( - self.api_host + '/identity/v1/oauth2/token', + f"{self.api_host}/identity/v1/oauth2/token", headers=headers, data=payload, method="POST" @@ -155,16 +155,16 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._authenticate() for stack_slug in self.stack_slugs: try: - workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads') + workloads = self._stackpath_query_get_list(f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads") except Exception: - raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc()) + raise AnsibleError(f"Failed to get workloads from the StackPath API: {traceback.format_exc()}") for workload in workloads: try: workload_instances = self._stackpath_query_get_list( - self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances' + f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads/{workload['id']}/instances" ) except Exception: - raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc()) + raise AnsibleError(f"Failed to get workload instances from the StackPath API: {traceback.format_exc()}") for instance in workload_instances: if instance["phase"] == "RUNNING": instance["stackSlug"] = stack_slug @@ -184,7 +184,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _populate(self, instances): for instance in instances: for group_key in self.group_keys: - group = group_key + "_" + instance[group_key] + group = f"{group_key}_{instance[group_key]}" group = group.lower().replace(" ", "_").replace("-", "_") self.inventory.add_group(group) self.inventory.add_host(instance[self.hostname_key], @@ -194,14 +194,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._authenticate() headers = { "Content-Type": "application/json", - "Authorization": "Bearer " + self.auth_token, + "Authorization": f"Bearer {self.auth_token}", } next_page = True result = [] cursor = '-1' while next_page: resp = open_url( - url + '?page_request.first=10&page_request.after=%s' % cursor, + f"{url}?page_request.first=10&page_request.after={cursor}", headers=headers, method="GET" ) @@ -251,10 +251,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self.stack_slugs = self.get_option('stack_slugs') if not self.stack_slugs: try: - stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks') + stacks = self._stackpath_query_get_list(f"{self.api_host}/stack/v1/stacks") self._get_stack_slugs(stacks) except Exception: - raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc()) + raise AnsibleError(f"Failed to get stack IDs from the Stackpath API: {traceback.format_exc()}") cache_key = self.get_cache_key(path) # false when refresh_cache or --flush-cache is used @@ -283,4 +283,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if cache_needs_update or (not cache and self.get_option('cache')): self._cache[cache_key] = results except Exception: - raise AnsibleError("Failed to populate data: %s" % traceback.format_exc()) + raise AnsibleError(f"Failed to populate data: {traceback.format_exc()}") diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index d48c294fd9..e6f401ca86 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -203,7 +203,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): else: # found vars, accumulate in hostvars for clean inventory set - pref_k = make_unsafe('vbox_' + k.strip().replace(' ', '_')) + pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}") leading_spaces = len(k) - len(k.lstrip(' ')) if 0 < leading_spaces <= 2: if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 4094af2468..0a050d0bf9 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -138,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE} self.conn = create_connection( - '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt) + f'{proto}://{xoa_api_host}/api/', sslopt=sslopt) CALL_TIMEOUT = 100 """Number of 1/10ths of a second to wait before method call times out.""" @@ -162,8 +162,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): sleep(0.1) waited += 1 - raise AnsibleError( - 'Method call {method} timed out after {timeout} seconds.'.format(method=method, timeout=self.CALL_TIMEOUT / 10)) + raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.') def login(self, user, password): result = self.call('session.signIn', { @@ -171,15 +170,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): }) if 'error' in result: - raise AnsibleError( - 'Could not connect: {0}'.format(result['error'])) + raise AnsibleError(f"Could not connect: {result['error']}") def get_object(self, name): answer = self.call('xo.getAllObjects', {'filter': {'type': name}}) if 'error' in answer: - raise AnsibleError( - 'Could not request: {0}'.format(answer['error'])) + raise AnsibleError(f"Could not request: {answer['error']}") return answer['result'] @@ -252,8 +249,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _add_hosts(self, hosts, pools): for host in hosts.values(): entry_name = host['uuid'] - group_name = 'xo_host_{0}'.format( - clean_group_name(host['name_label'])) + group_name = f"xo_host_{clean_group_name(host['name_label'])}" pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) self.inventory.add_group(group_name) @@ -276,15 +272,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): entry_name, 'product_brand', host['productBrand']) for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) def _add_pools(self, pools): for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) @@ -292,16 +286,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _pool_group_name_for_uuid(self, pools, pool_uuid): for pool in pools: if pool == pool_uuid: - return 'xo_pool_{0}'.format( - clean_group_name(pools[pool_uuid]['name_label'])) + return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}" # TODO: Refactor def _host_group_name_for_uuid(self, hosts, host_uuid): for host in hosts: if host == host_uuid: - return 'xo_host_{0}'.format( - clean_group_name(hosts[host_uuid]['name_label'] - )) + return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}" def _populate(self, objects): # Prepare general groups From 79bef1a14c09f652f0d2833cd0eea3251309c373 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 23 Dec 2024 23:21:25 +1300 Subject: [PATCH 390/482] action plugins: use f-strings (#9318) * action plugins: use f-strings * add changelog frag * adjustment from review --- .../fragments/9318-fstr-actionplugins.yml | 3 + plugins/action/iptables_state.py | 58 +++++++++-------- plugins/action/shutdown.py | 62 ++++++++----------- 3 files changed, 62 insertions(+), 61 deletions(-) create mode 100644 changelogs/fragments/9318-fstr-actionplugins.yml diff --git a/changelogs/fragments/9318-fstr-actionplugins.yml b/changelogs/fragments/9318-fstr-actionplugins.yml new file mode 100644 index 0000000000..7df54f3c19 --- /dev/null +++ b/changelogs/fragments/9318-fstr-actionplugins.yml @@ -0,0 +1,3 @@ +minor_changes: + - iptables_state action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318). + - shutdown action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318). diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index 5ea55af58c..39ee85d778 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -22,25 +22,33 @@ class ActionModule(ActionBase): _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) DEFAULT_SUDOABLE = True - MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = ( - "This module doesn't support async>0 and poll>0 when its 'state' param " - "is set to 'restored'. To enable its rollback feature (that needs the " - "module to run asynchronously on the remote), please set task attribute " - "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = ( - "Attempts to restore iptables state without rollback in case of mistake " - "may lead the ansible controller to loose access to the hosts and never " - "regain it before fixing firewall rules through a serial console, or any " - "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and " - "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) " - "(recommended).") - MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = ( - "You attempt to restore iptables state with rollback in case of mistake, " - "but with settings that will lead this rollback to happen AFTER that the " - "controller will reach its own timeout. Please set task attribute 'poll' " - "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") + @staticmethod + def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout): + return ( + "This module doesn't support async>0 and poll>0 when its 'state' param " + "is set to 'restored'. To enable its rollback feature (that needs the " + "module to run asynchronously on the remote), please set task attribute " + f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") + + @staticmethod + def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): + return ( + "Attempts to restore iptables state without rollback in case of mistake " + "may lead the ansible controller to loose access to the hosts and never " + "regain it before fixing firewall rules through a serial console, or any " + f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and " + f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) " + "(recommended).") + + @staticmethod + def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): + return ( + "You attempt to restore iptables state with rollback in case of mistake, " + "but with settings that will lead this rollback to happen AFTER that the " + "controller will reach its own timeout. Please set task attribute 'poll' " + f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") def _async_result(self, async_status_args, task_vars, timeout): ''' @@ -95,18 +103,18 @@ class ActionModule(ActionBase): if module_args.get('state', None) == 'restored': if not wrap_async: if not check_mode: - display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % ( + display.warning(self.msg_error__async_and_poll_not_zero( task_poll, task_async, max_timeout)) elif task_poll: - raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % ( + raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback( task_poll, task_async, max_timeout)) else: if task_async > max_timeout and not check_mode: - display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % ( + display.warning(self.msg_warning__async_greater_than_timeout( task_poll, task_async, max_timeout)) @@ -119,10 +127,10 @@ class ActionModule(ActionBase): # remote and local sides (if not the same, make the loop # longer on the controller); and set a backup file path. module_args['_timeout'] = task_async - module_args['_back'] = '%s/iptables.state' % async_dir + module_args['_back'] = f'{async_dir}/iptables.state' async_status_args = dict(mode='status') - confirm_cmd = 'rm -f %s' % module_args['_back'] - starter_cmd = 'touch %s.starter' % module_args['_back'] + confirm_cmd = f"rm -f {module_args['_back']}" + starter_cmd = f"touch {module_args['_back']}.starter" remaining_time = max(task_async, max_timeout) # do work! diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py index 01201a6405..9505cc2155 100644 --- a/plugins/action/shutdown.py +++ b/plugins/action/shutdown.py @@ -18,6 +18,10 @@ from ansible.utils.display import Display display = Display() +def fmt(mapping, key): + return to_native(mapping[key]).strip() + + class TimedOutException(Exception): pass @@ -84,31 +88,26 @@ class ActionModule(ActionBase): def get_distribution(self, task_vars): # FIXME: only execute the module if we don't already have the facts we need distribution = {} - display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) + display.debug(f'{self._task.action}: running setup module to get distribution') module_output = self._execute_module( task_vars=task_vars, module_name='ansible.legacy.setup', module_args={'gather_subset': 'min'}) try: if module_output.get('failed', False): - raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( - to_native(module_output['module_stdout']).strip(), - to_native(module_output['module_stderr']).strip())) + raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}") distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() distribution['version'] = to_text( module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) - display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) + display.debug(f"{self._task.action}: distribution: {distribution}") return distribution except KeyError as ke: - raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) + raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.') def get_shutdown_command(self, task_vars, distribution): def find_command(command, find_search_paths): - display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( - action=self._task.action, - command=command, - paths=find_search_paths)) + display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"') find_result = self._execute_module( task_vars=task_vars, # prevent collection search by calling with ansible.legacy (still allows library/ override of find) @@ -130,42 +129,37 @@ class ActionModule(ActionBase): if is_string(search_paths): search_paths = [search_paths] - # Error if we didn't get a list - err_msg = "'search_paths' must be a string or flat list of strings, got {0}" try: incorrect_type = any(not is_string(x) for x in search_paths) if not isinstance(search_paths, list) or incorrect_type: raise TypeError except TypeError: - raise AnsibleError(err_msg.format(search_paths)) + # Error if we didn't get a list + err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}" + raise AnsibleError(err_msg) full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command if not full_path: # if we could not find the shutdown command - display.vvv('Unable to find command "{0}" in search paths: {1}, will attempt a shutdown using systemd ' - 'directly.'.format(shutdown_bin, search_paths)) # tell the user we will try with systemd + + # tell the user we will try with systemd + display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.') systemctl_search_paths = ['/bin', '/usr/bin'] full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command if not full_path: # if we couldn't find systemctl raise AnsibleError( - 'Could not find command "{0}" in search paths: {1} or systemctl command in search paths: {2}, unable to shutdown.'. - format(shutdown_bin, search_paths, systemctl_search_paths)) # we give up here + f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl' + f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here else: - return "{0} poweroff".format(full_path[0]) # done, since we cannot use args with systemd shutdown + return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown # systemd case taken care of, here we add args to the command args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') # Convert seconds to minutes. If less that 60, set it to 0. delay_sec = self.delay shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) - return '{0} {1}'. \ - format( - full_path[0], - args.format( - delay_sec=delay_sec, - delay_min=delay_sec // 60, - message=shutdown_message - ) - ) + + af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) + return f'{full_path[0]} {af}' def perform_shutdown(self, task_vars, distribution): result = {} @@ -174,9 +168,8 @@ class ActionModule(ActionBase): self.cleanup(force=True) try: - display.vvv("{action}: shutting down server...".format(action=self._task.action)) - display.debug("{action}: shutting down server with command '{command}'". - format(action=self._task.action, command=shutdown_command_exec)) + display.vvv(f"{self._task.action}: shutting down server...") + display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'") if self._play_context.check_mode: shutdown_result['rc'] = 0 else: @@ -184,16 +177,13 @@ class ActionModule(ActionBase): except AnsibleConnectionFailure as e: # If the connection is closed too quickly due to the system being shutdown, carry on display.debug( - '{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, - error=to_text(e))) + f'{self._task.action}: AnsibleConnectionFailure caught and handled: {to_text(e)}') shutdown_result['rc'] = 0 if shutdown_result['rc'] != 0: result['failed'] = True result['shutdown'] = False - result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format( - stdout=to_native(shutdown_result['stdout'].strip()), - stderr=to_native(shutdown_result['stderr'].strip())) + result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" return result result['failed'] = False @@ -206,7 +196,7 @@ class ActionModule(ActionBase): # If running with local connection, fail so we don't shutdown ourself if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action) + msg = f'Running {self._task.action} with local connection would shutdown the control node.' return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} if task_vars is None: From da51594f9d626f0b3b0e62fcf2c3e0f857195eec Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 24 Dec 2024 00:21:59 +1300 Subject: [PATCH 391/482] qubes connection plugin: fix display stmt (#9334) * qubes connection plugin: fix display stmt * add changelog frag --- changelogs/fragments/9334-qubes-conn.yml | 2 ++ plugins/connection/qubes.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9334-qubes-conn.yml diff --git a/changelogs/fragments/9334-qubes-conn.yml b/changelogs/fragments/9334-qubes-conn.yml new file mode 100644 index 0000000000..3faa8d7981 --- /dev/null +++ b/changelogs/fragments/9334-qubes-conn.yml @@ -0,0 +1,2 @@ +bugfixes: + - qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334). diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index b54eeb3a84..8860fbb777 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -118,7 +118,7 @@ class Connection(ConnectionBase): rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + display.vvvvv("STDOUT %r STDERR %r" % (stdout, stderr)) return rc, stdout, stderr def put_file(self, in_path, out_path): From 28f36ae25c233d294e266c250e8cf34e02445857 Mon Sep 17 00:00:00 2001 From: Chris Northwood Date: Mon, 23 Dec 2024 17:51:34 +0000 Subject: [PATCH 392/482] Add action group for keycloak (#9284) * Create group for keycloak This will allows keycloak authentication details to be set as a module_defaults rather than repeated on each task * add documentation to keycloak modules to note creation of action_group * add changelog for keycloak action_group creation * exclude keycloak_realm_info from action group, as it does not share same set of base parameters * fix formatting on changelog entry for adding Keycloak action group Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../9284-add-keycloak-action-group.yml | 2 ++ meta/runtime.yml | 28 +++++++++++++++++++ plugins/doc_fragments/keycloak.py | 10 +++++++ plugins/modules/keycloak_authentication.py | 3 ++ ...eycloak_authentication_required_actions.py | 3 ++ .../keycloak_authz_authorization_scope.py | 3 ++ .../modules/keycloak_authz_custom_policy.py | 3 ++ plugins/modules/keycloak_authz_permission.py | 3 ++ .../modules/keycloak_authz_permission_info.py | 5 ++++ plugins/modules/keycloak_client.py | 3 ++ .../modules/keycloak_client_rolemapping.py | 3 ++ plugins/modules/keycloak_client_rolescope.py | 3 ++ plugins/modules/keycloak_clientscope.py | 3 ++ plugins/modules/keycloak_clientscope_type.py | 3 ++ plugins/modules/keycloak_clientsecret_info.py | 5 ++++ .../keycloak_clientsecret_regenerate.py | 3 ++ plugins/modules/keycloak_clienttemplate.py | 3 ++ plugins/modules/keycloak_component.py | 3 ++ plugins/modules/keycloak_component_info.py | 6 ++++ plugins/modules/keycloak_group.py | 3 ++ plugins/modules/keycloak_identity_provider.py | 3 ++ plugins/modules/keycloak_realm.py | 3 ++ plugins/modules/keycloak_realm_key.py | 3 ++ .../keycloak_realm_keys_metadata_info.py | 5 ++++ plugins/modules/keycloak_realm_rolemapping.py | 3 ++ plugins/modules/keycloak_role.py | 3 ++ plugins/modules/keycloak_user.py | 3 ++ plugins/modules/keycloak_user_federation.py | 3 ++ plugins/modules/keycloak_user_rolemapping.py | 3 ++ plugins/modules/keycloak_userprofile.py | 3 ++ 30 files changed, 130 insertions(+) create mode 100644 changelogs/fragments/9284-add-keycloak-action-group.yml diff --git a/changelogs/fragments/9284-add-keycloak-action-group.yml b/changelogs/fragments/9284-add-keycloak-action-group.yml new file mode 100644 index 0000000000..b25c370346 --- /dev/null +++ b/changelogs/fragments/9284-add-keycloak-action-group.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak - add an action group for Keycloak modules to allow ``module_defaults`` to be set for Keycloak tasks (https://github.com/ansible-collections/community.general/pull/9284). diff --git a/meta/runtime.yml b/meta/runtime.yml index 387ab0f22b..5b3bb0b6f0 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -32,6 +32,34 @@ action_groups: - proxmox_template - proxmox_user_info - proxmox_vm_info + keycloak: + - keycloak_authentication + - keycloak_authentication_required_actions + - keycloak_authz_authorization_scope + - keycloak_authz_custom_policy + - keycloak_authz_permission + - keycloak_authz_permission_info + - keycloak_client + - keycloak_client_rolemapping + - keycloak_client_rolescope + - keycloak_clientscope + - keycloak_clientscope_type + - keycloak_clientsecret_info + - keycloak_clientsecret_regenerate + - keycloak_clienttemplate + - keycloak_component + - keycloak_component_info + - keycloak_group + - keycloak_identity_provider + - keycloak_realm + - keycloak_realm_key + - keycloak_realm_keys_metadata_info + - keycloak_realm_rolemapping + - keycloak_role + - keycloak_user + - keycloak_user_federation + - keycloak_user_rolemapping + - keycloak_userprofile plugin_routing: callback: actionable: diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index 9b21ce52c9..b64a23b088 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -77,3 +77,13 @@ options: default: Ansible version_added: 5.4.0 ''' + + ACTIONGROUP_KEYCLOAK = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.keycloak +""" diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py index 5945890bb7..fa2ae1e974 100644 --- a/plugins/modules/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -24,6 +24,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: realm: @@ -102,6 +104,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py index 5ffbd2033c..c992e05d2d 100644 --- a/plugins/modules/keycloak_authentication_required_actions.py +++ b/plugins/modules/keycloak_authentication_required_actions.py @@ -26,6 +26,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: realm: @@ -77,6 +79,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py index 5eef9ac765..146a37bf51 100644 --- a/plugins/modules/keycloak_authz_authorization_scope.py +++ b/plugins/modules/keycloak_authz_authorization_scope.py @@ -35,6 +35,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -74,6 +76,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py index 8363c252e2..2205a184f5 100644 --- a/plugins/modules/keycloak_authz_custom_policy.py +++ b/plugins/modules/keycloak_authz_custom_policy.py @@ -36,6 +36,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 10.2.0 options: state: @@ -71,6 +73,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py index ef81fb8c31..08b5ae6295 100644 --- a/plugins/modules/keycloak_authz_permission.py +++ b/plugins/modules/keycloak_authz_permission.py @@ -43,6 +43,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 10.2.0 options: state: @@ -121,6 +123,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py index 8b4e96b416..4c1a6ddf66 100644 --- a/plugins/modules/keycloak_authz_permission_info.py +++ b/plugins/modules/keycloak_authz_permission_info.py @@ -31,6 +31,10 @@ description: The Authorization Services paths and payloads have not officially been documented by the Keycloak project. U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/) +attributes: + action_group: + version_added: 10.2.0 + options: name: description: @@ -51,6 +55,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index 62015bc79f..c65105ec23 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -35,6 +35,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -595,6 +597,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py index be419904a7..ebc60b9fd0 100644 --- a/plugins/modules/keycloak_client_rolemapping.py +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -38,6 +38,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -130,6 +132,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py index cca72f0ddd..cd4f0fa80e 100644 --- a/plugins/modules/keycloak_client_rolescope.py +++ b/plugins/modules/keycloak_client_rolescope.py @@ -34,6 +34,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -76,6 +78,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index 35ac3d9500..87437b6df0 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -38,6 +38,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -158,6 +160,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index 1fb9a0813c..d95ab0b44d 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -30,6 +30,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: realm: @@ -59,6 +61,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py index c772620351..58786a5364 100644 --- a/plugins/modules/keycloak_clientsecret_info.py +++ b/plugins/modules/keycloak_clientsecret_info.py @@ -32,6 +32,10 @@ description: - "Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to the task." +attributes: + action_group: + version_added: 10.2.0 + options: realm: type: str @@ -57,6 +61,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py index 7e8b295433..fc279f6a9d 100644 --- a/plugins/modules/keycloak_clientsecret_regenerate.py +++ b/plugins/modules/keycloak_clientsecret_regenerate.py @@ -37,6 +37,8 @@ attributes: support: full diff_mode: support: none + action_group: + version_added: 10.2.0 options: realm: @@ -63,6 +65,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py index 7bffb5cbb6..63bb32f972 100644 --- a/plugins/modules/keycloak_clienttemplate.py +++ b/plugins/modules/keycloak_clienttemplate.py @@ -33,6 +33,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -168,6 +170,7 @@ notes: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py index 375953c3e8..6c345700ae 100644 --- a/plugins/modules/keycloak_component.py +++ b/plugins/modules/keycloak_component.py @@ -32,6 +32,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -72,6 +74,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py index a788735d98..cc0c4d31d9 100644 --- a/plugins/modules/keycloak_component_info.py +++ b/plugins/modules/keycloak_component_info.py @@ -18,6 +18,11 @@ version_added: 8.2.0 description: - This module retrive information on component from Keycloak. + +attributes: + action_group: + version_added: 10.2.0 + options: realm: description: @@ -46,6 +51,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py index 5398a4b5d0..24564f2d4d 100644 --- a/plugins/modules/keycloak_group.py +++ b/plugins/modules/keycloak_group.py @@ -36,6 +36,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -114,6 +116,7 @@ notes: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index 609673653b..0ef67ee385 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -31,6 +31,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -277,6 +279,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 747acf3081..e190e7ad7b 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -37,6 +37,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -516,6 +518,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py index edc8a6068e..159fb77d4b 100644 --- a/plugins/modules/keycloak_realm_key.py +++ b/plugins/modules/keycloak_realm_key.py @@ -43,6 +43,8 @@ attributes: support: full diff_mode: support: partial + action_group: + version_added: 10.2.0 options: state: @@ -138,6 +140,7 @@ notes: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py index ef4048b891..9b490fad58 100644 --- a/plugins/modules/keycloak_realm_keys_metadata_info.py +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -23,6 +23,10 @@ description: - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). +attributes: + action_group: + version_added: 10.2.0 + options: realm: type: str @@ -32,6 +36,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py index 693cf9894a..0c24b2f949 100644 --- a/plugins/modules/keycloak_realm_rolemapping.py +++ b/plugins/modules/keycloak_realm_rolemapping.py @@ -40,6 +40,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -118,6 +120,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py index f3e01483f8..3978260189 100644 --- a/plugins/modules/keycloak_role.py +++ b/plugins/modules/keycloak_role.py @@ -35,6 +35,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -116,6 +118,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py index 1aeff0da5f..cb63707402 100644 --- a/plugins/modules/keycloak_user.py +++ b/plugins/modules/keycloak_user.py @@ -212,12 +212,15 @@ options: default: false extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes attributes: check_mode: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 notes: - The module does not modify the user ID of an existing user. author: diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 215aa7f4ca..4533fa800d 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -31,6 +31,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -513,6 +515,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py index 59727a346e..40e3b38d5c 100644 --- a/plugins/modules/keycloak_user_rolemapping.py +++ b/plugins/modules/keycloak_user_rolemapping.py @@ -37,6 +37,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -110,6 +112,7 @@ options: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py index 57e1c42e96..ba9cae8554 100644 --- a/plugins/modules/keycloak_userprofile.py +++ b/plugins/modules/keycloak_userprofile.py @@ -28,6 +28,8 @@ attributes: support: full diff_mode: support: full + action_group: + version_added: 10.2.0 options: state: @@ -270,6 +272,7 @@ notes: extends_documentation_fragment: - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak - community.general.attributes author: From 005c8f50db934624b6884d0dd12d47225a8b1301 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 24 Dec 2024 06:56:37 +1300 Subject: [PATCH 393/482] proxmox_backup: refactor permission checking (#9239) * proxmox_backup: refactor permission checking * add changelog frag * Update plugins/modules/proxmox_backup.py * Update plugins/modules/proxmox_backup.py * Update plugins/modules/proxmox_backup.py * Update plugins/modules/proxmox_backup.py For consistency * Update plugins/modules/proxmox_backup.py * yet another missing slash --- .../9239-proxmox-backup-refactor.yml | 2 + plugins/modules/proxmox_backup.py | 173 ++++++------------ 2 files changed, 61 insertions(+), 114 deletions(-) create mode 100644 changelogs/fragments/9239-proxmox-backup-refactor.yml diff --git a/changelogs/fragments/9239-proxmox-backup-refactor.yml b/changelogs/fragments/9239-proxmox-backup-refactor.yml new file mode 100644 index 0000000000..4f73fe6dde --- /dev/null +++ b/changelogs/fragments/9239-proxmox-backup-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_backup - refactor permission checking to improve code readability and maintainability (https://github.com/ansible-collections/community.general/pull/9239). diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py index 575d492bf9..0db2c4ad0e 100644 --- a/plugins/modules/proxmox_backup.py +++ b/plugins/modules/proxmox_backup.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -230,13 +231,16 @@ backups: type: str ''' -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.basic import AnsibleModule - import time +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.proxmox import ProxmoxAnsible, proxmox_auth_argument_spec + + +def has_permission(permission_tree, permission, search_scopes, default=0, expected=1): + return any(permission_tree.get(scope, {}).get(permission, default) == expected for scope in search_scopes) + class ProxmoxBackupAnsible(ProxmoxAnsible): @@ -264,21 +268,20 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): for node in node_endpoints: upid = self._post_vzdump(node, request_body) if upid != "OK": - tasklog = ", ".join( - [logentry["t"] for logentry in self._get_tasklog(node, upid)]) + tasklog = ", ".join(logentry["t"] for logentry in self._get_tasklog(node, upid)) else: tasklog = "" - task_ids.extend( - [{"node": node, "upid": upid, "status": "unknown", "log": "%s" % tasklog}]) + task_ids.extend([{"node": node, "upid": upid, "status": "unknown", "log": "%s" % tasklog}]) return task_ids def check_relevant_nodes(self, node): - nodes = [item["node"] for item in self._get_resources( - "node") if item["status"] == "online"] + nodes = [ + item["node"] + for item in self._get_resources("node") + if item["status"] == "online" + ] if node and node not in nodes: - self.module.fail_json( - msg="Node %s was specified, but does not exist on the cluster" % - node) + self.module.fail_json(msg="Node %s was specified, but does not exist on the cluster" % node) elif node: return [node] return nodes @@ -291,57 +294,28 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): performance, retention): # Check for Datastore.AllocateSpace in the permission tree - if "/" in permissions.keys() and permissions["/"].get( - "Datastore.AllocateSpace", 0) == 1: - pass - elif "/storage" in permissions.keys() and permissions["/storage"].get("Datastore.AllocateSpace", 0) == 1: - pass - elif "/storage/" + storage in permissions.keys() and permissions["/storage/" + storage].get("Datastore.AllocateSpace", 0) == 1: - pass - else: - self.module.fail_json( - changed=False, - msg="Insufficient permission: Datastore.AllocateSpace is missing") - if (bandwidth or performance) and permissions["/"].get( - "Sys.Modify", 0) == 0: - self.module.fail_json( - changed=False, - msg="Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'") + if not has_permission(permissions, "Datastore.AllocateSpace", search_scopes=["/", "/storage/", "/storage/" + storage]): + self.module.fail_json(changed=False, msg="Insufficient permission: Datastore.AllocateSpace is missing") + + if (bandwidth or performance) and has_permission(permissions, "Sys.Modify", search_scopes=["/"], expected=0): + self.module.fail_json(changed=False, msg="Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'") + if retention: - if "/" in permissions.keys() and permissions["/"].get( - "Datastore.Allocate", 0) == 1: - pass - elif "/storage" in permissions.keys() and permissions["/storage"].get("Datastore.Allocate", 0) == 1: - pass - elif "/storage/" + storage in permissions.keys() and permissions["/storage/" + storage].get("Datastore.Allocate", 0) == 1: - pass - else: - self.module.fail_json( - changed=False, - msg="Insufficient permissions: Custom retention was requested, but Datastore.Allocate is missing") + if not has_permission(permissions, "Datastore.Allocate", search_scopes=["/", "/storage", "/storage/" + storage]): + self.module.fail_json(changed=False, msg="Insufficient permissions: Custom retention was requested, but Datastore.Allocate is missing") def check_vmid_backup_permission(self, permissions, vmids, pool): - sufficient_permissions = False - if "/" in permissions.keys() and permissions["/"].get( - "VM.Backup", 0) == 1: - sufficient_permissions = True - elif "/vms" in permissions.keys() and permissions["/vms"].get( - "VM.Backup", 0) == 1: - sufficient_permissions = True - elif pool and "/pool/" + pool in permissions.keys() and permissions["/pool/" + pool].get( - "VM.Backup", 0) == 1: - sufficient_permissions = True - elif pool and "/pool/" + pool + "/vms" in permissions.keys() and permissions["/pool/" + pool + "/vms"].get( - "VM.Backup", 0) == 1: - sufficient_permissions = True + sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"]) + if pool and not sufficient_permissions: + sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/pool/" + pool, "/pool/" + pool + "/vms"]) if not sufficient_permissions: # Since VM.Backup can be given for each vmid at a time, iterate through all of them # and check, if the permission is set failed_vmids = [] for vm in vmids: - if "/vms/" + \ - str(vm) in permissions.keys() and permissions["/vms/" + str(vm)].get("VM.Backup", 1) == 0: + vm_path = "/vms/" + str(vm) + if has_permission(permissions, "VM.Backup", search_scopes=[vm_path], default=1, expected=0): failed_vmids.append(str(vm)) if failed_vmids: self.module.fail_json( @@ -351,23 +325,11 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): sufficient_permissions = True # Finally, when no check succeeded, fail if not sufficient_permissions: - self.module.fail_json( - changed=False, - msg="Insufficient permissions: You do not have the VM.Backup permission") + self.module.fail_json(changed=False, msg="Insufficient permissions: You do not have the VM.Backup permission") def check_general_backup_permission(self, permissions, pool): - if "/" in permissions.keys() and permissions["/"].get( - "VM.Backup", 0) == 1: - pass - elif "/vms" in permissions.keys() and permissions["/vms"].get("VM.Backup", 0) == 1: - pass - elif pool and "/pool/" + pool in permissions.keys() and permissions["/pool/" + pool].get( - "VM.Backup", 0) == 1: - pass - else: - self.module.fail_json( - changed=False, - msg="Insufficient permissions: You dont have the VM.Backup permission") + if not has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"] + (["/pool/" + pool] if pool else [])): + self.module.fail_json(changed=False, msg="Insufficient permissions: You dont have the VM.Backup permission") def check_if_storage_exists(self, storage, node): storages = self.get_storages(type=None) @@ -413,21 +375,19 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): status = self._get_taskok(node["node"], node["upid"]) if status["status"] == "stopped" and status["exitstatus"] == "OK": node["status"] = "success" - if status["status"] == "stopped" and status["exitstatus"] in ( - "job errors",): + if status["status"] == "stopped" and status["exitstatus"] == "job errors": node["status"] = "failed" except Exception as e: - self.module.fail_json( - msg="Unable to retrieve API task ID from node %s: %s" % - (node["node"], e)) - if len([item for item in tasks if item["status"] - != "unknown"]) == len(tasks): + self.module.fail_json(msg="Unable to retrieve API task ID from node %s: %s" % (node["node"], e)) + if len([item for item in tasks if item["status"] != "unknown"]) == len(tasks): break if time.time() > start_time + timeout: - timeouted_nodes = [node["node"] - for node in tasks if node["status"] == "unknown"] - failed_nodes = [node["node"] - for node in tasks if node["status"] == "failed"] + timeouted_nodes = [ + node["node"] + for node in tasks + if node["status"] == "unknown" + ] + failed_nodes = [node["node"] for node in tasks if node["status"] == "failed"] if failed_nodes: self.module.fail_json( msg="Reached timeout while waiting for backup task. " @@ -443,8 +403,7 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): error_logs = [] for node in tasks: if node["status"] == "failed": - tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog( - node["node"], node["upid"])]) + tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])]) error_logs.append("%s: %s" % (node, tasklog)) if error_logs: self.module.fail_json( @@ -453,9 +412,8 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): ", ".join(error_logs)) for node in tasks: - tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog( - node["node"], node["upid"])]) - node["log"] = "%s" % tasklog + tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])]) + node["log"] = tasklog # Finally, reattach ok tasks to show, that all nodes were contacted tasks.extend(ok_tasks) @@ -516,8 +474,7 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): # Create comma separated list from vmids, the API expects so if request_body.get("vmid"): - request_body.update( - {"vmid": ",".join([str(vmid) for vmid in request_body.get("vmid")])}) + request_body.update({"vmid": ",".join(str(vmid) for vmid in request_body["vmid"])}) # remove whitespaces from option strings for key in ("prune-backups", "performance"): @@ -550,26 +507,16 @@ class ProxmoxBackupAnsible(ProxmoxAnsible): def main(): module_args = proxmox_auth_argument_spec() backup_args = { - "backup_mode": {"type": "str", "default": "snapshot", "choices": [ - "snapshot", "suspend", "stop" - ]}, + "backup_mode": {"type": "str", "default": "snapshot", "choices": ["snapshot", "suspend", "stop"]}, "bandwidth": {"type": "int"}, - "change_detection_mode": {"type": "str", "choices": [ - "legacy", "data", "metadata" - ]}, - "compress": {"type": "str", "choices": [ - "0", "1", "gzip", "lzo", "zstd" - ]}, + "change_detection_mode": {"type": "str", "choices": ["legacy", "data", "metadata"]}, + "compress": {"type": "str", "choices": ["0", "1", "gzip", "lzo", "zstd"]}, "compression_threads": {"type": "int"}, "description": {"type": "str", "default": "{{guestname}}"}, "fleecing": {"type": "str"}, - "mode": {"type": "str", "required": True, "choices": [ - "include", "all", "pool" - ]}, + "mode": {"type": "str", "required": True, "choices": ["include", "all", "pool"]}, "node": {"type": "str"}, - "notification_mode": {"type": "str", "default": "auto", "choices": [ - "auto", "legacy-sendmail", "notification-system" - ]}, + "notification_mode": {"type": "str", "default": "auto", "choices": ["auto", "legacy-sendmail", "notification-system"]}, "performance_tweaks": {"type": "str"}, "pool": {"type": "str"}, "protected": {"type": "bool"}, @@ -611,21 +558,19 @@ def main(): proxmox.check_vmids(module.params["vmids"]) node_endpoints = proxmox.check_relevant_nodes(module.params["node"]) try: - result = proxmox.backup_create( - module.params, module.check_mode, node_endpoints) + result = proxmox.backup_create(module.params, module.check_mode, node_endpoints) except Exception as e: - module.fail_json( - msg="Creating backups failed with exception: %s" % to_native(e)) + module.fail_json(msg="Creating backups failed with exception: %s" % to_native(e)) + if module.check_mode: - module.exit_json(backups=result, changed=True, - msg="Backups would be created") + module.exit_json(backups=result, changed=True, msg="Backups would be created") + elif len([entry for entry in result if entry["upid"] == "OK"]) == len(result): - module.exit_json( - backups=result, - changed=False, - msg="Backup request sent to proxmox, no tasks created") + module.exit_json(backups=result, changed=False, msg="Backup request sent to proxmox, no tasks created") + elif module.params["wait"]: module.exit_json(backups=result, changed=True, msg="Backups succeeded") + else: module.exit_json(backups=result, changed=True, msg="Backup tasks created") From f9bfe4e4a6838c7a9a7aedde72be32611fcff7f6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 24 Dec 2024 06:58:02 +1300 Subject: [PATCH 394/482] x*: adjust docs (#9308) * adjust docs * Update plugins/modules/xml.py Co-authored-by: Felix Fontein * fix capitalisation * add markup to references of the xe command (xenserver) * add missing markup * Update plugins/modules/xml.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/xattr.py | 33 +- plugins/modules/xbps.py | 158 +++-- plugins/modules/xcc_redfish_command.py | 325 +++++----- plugins/modules/xenserver_facts.py | 11 +- plugins/modules/xenserver_guest.py | 585 +++++++++--------- plugins/modules/xenserver_guest_info.py | 62 +- plugins/modules/xenserver_guest_powerstate.py | 79 ++- plugins/modules/xfconf.py | 97 ++- plugins/modules/xfconf_info.py | 91 ++- plugins/modules/xfs_quota.py | 52 +- plugins/modules/xml.py | 197 +++--- 11 files changed, 827 insertions(+), 863 deletions(-) diff --git a/plugins/modules/xattr.py b/plugins/modules/xattr.py index 7a5f3b431f..11b036ff66 100644 --- a/plugins/modules/xattr.py +++ b/plugins/modules/xattr.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: xattr short_description: Manage user defined extended attributes description: - Manages filesystem user defined extended attributes. - - Requires that extended attributes are enabled on the target filesystem - and that the setfattr/getfattr utilities are present. + - Requires that extended attributes are enabled on the target filesystem and that the C(setfattr)/C(getfattr) utilities are present. extends_documentation_fragment: - community.general.attributes attributes: @@ -29,7 +27,7 @@ options: - The full path of the file/object to get the facts of. type: path required: true - aliases: [ name ] + aliases: [name] namespace: description: - Namespace of the named name/key. @@ -45,27 +43,26 @@ options: type: str state: description: - - defines which state you want to do. - V(read) retrieves the current value for a O(key) (default) - V(present) sets O(path) to O(value), default if value is set - V(all) dumps all data - V(keys) retrieves all keys - V(absent) deletes the key + - Defines which state you want to do. + - V(read) retrieves the current value for a O(key). + - V(present) sets O(path) to O(value), default if value is set. + - V(all) dumps all data. + - V(keys) retrieves all keys. + - V(absent) deletes the key. type: str - choices: [ absent, all, keys, present, read ] + choices: [absent, all, keys, present, read] default: read follow: description: - - If V(true), dereferences symlinks and sets/gets attributes on symlink target, - otherwise acts on symlink itself. + - If V(true), dereferences symlinks and sets/gets attributes on symlink target, otherwise acts on symlink itself. type: bool default: true author: - Brian Coca (@bcoca) -''' +""" -EXAMPLES = ''' -- name: Obtain the extended attributes of /etc/foo.conf +EXAMPLES = r""" +- name: Obtain the extended attributes of /etc/foo.conf community.general.xattr: path: /etc/foo.conf @@ -94,7 +91,7 @@ EXAMPLES = ''' namespace: trusted key: glusterfs.volume-id state: absent -''' +""" import os diff --git a/plugins/modules/xbps.py b/plugins/modules/xbps.py index cd34029eba..9f6cb59d98 100644 --- a/plugins/modules/xbps.py +++ b/plugins/modules/xbps.py @@ -10,86 +10,78 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: xbps short_description: Manage packages with XBPS description: - - Manage packages with the XBPS package manager. + - Manage packages with the XBPS package manager. author: - - "Dino Occhialini (@dinoocch)" - - "Michael Aldridge (@the-maldridge)" + - "Dino Occhialini (@dinoocch)" + - "Michael Aldridge (@the-maldridge)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the package to install, upgrade, or remove. - aliases: [pkg,package] - type: list - elements: str - state: - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent", "latest", "installed", "removed"] - type: str - recurse: - description: - - When removing a package, also remove its dependencies, provided - that they are not required by other packages and were not - explicitly installed by a user. - type: bool - default: false - update_cache: - description: - - Whether or not to refresh the master package lists. This can be - run as part of a package installation or as a separate step. - type: bool - default: true - upgrade: - description: - - Whether or not to upgrade whole system - type: bool - default: false - upgrade_xbps: - description: - - Whether or not to upgrade the xbps package when necessary. - Before installing new packages, - xbps requires the user to update the xbps package itself. - Thus when this option is set to V(false), - upgrades and installations will fail when xbps is not up to date. - type: bool - default: true - version_added: '0.2.0' - root: - description: - - The full path for the target root directory. - type: path - version_added: '10.2.0' - repositories: - description: - - Repository URL(s) to prepend to the repository list for the - package installation. - The URL can be a URL to a repository for - remote repositories or a path for local repositories. - type: list - elements: str - version_added: '10.2.0' - accept_pubkey: - description: - - Whether or not repository signing keys should be automatically accepted. - type: bool - default: false - version_added: '10.2.0' -''' + name: + description: + - Name of the package to install, upgrade, or remove. + aliases: [pkg, package] + type: list + elements: str + state: + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent", "latest", "installed", "removed"] + type: str + recurse: + description: + - When removing a package, also remove its dependencies, provided that they are not required by other packages and were not explicitly installed + by a user. + type: bool + default: false + update_cache: + description: + - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate step. + type: bool + default: true + upgrade: + description: + - Whether or not to upgrade whole system. + type: bool + default: false + upgrade_xbps: + description: + - Whether or not to upgrade the xbps package when necessary. Before installing new packages, xbps requires the user to update the xbps package + itself. Thus when this option is set to V(false), upgrades and installations will fail when xbps is not up to date. + type: bool + default: true + version_added: '0.2.0' + root: + description: + - The full path for the target root directory. + type: path + version_added: '10.2.0' + repositories: + description: + - Repository URL(s) to prepend to the repository list for the package installation. The URL can be a URL to a repository for remote repositories + or a path for local repositories. + type: list + elements: str + version_added: '10.2.0' + accept_pubkey: + description: + - Whether or not repository signing keys should be automatically accepted. + type: bool + default: false + version_added: '10.2.0' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo (automatically updating the xbps package if needed) community.general.xbps: name: foo @@ -151,20 +143,20 @@ EXAMPLES = ''' state: present repositories: https://repo-default.voidlinux.org/current root: /mnt -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Message about results - returned: success - type: str - sample: "System Upgraded" + description: Message about results. + returned: success + type: str + sample: "System Upgraded" packages: - description: Packages that are affected/would be affected - type: list - sample: ["ansible"] - returned: success -''' + description: Packages that are affected/would be affected. + type: list + sample: ["ansible"] + returned: success +""" import os diff --git a/plugins/modules/xcc_redfish_command.py b/plugins/modules/xcc_redfish_command.py index 1e77d0f8db..a5b2ff57c2 100644 --- a/plugins/modules/xcc_redfish_command.py +++ b/plugins/modules/xcc_redfish_command.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: xcc_redfish_command short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs version_added: 2.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action or get information back or update a configuration attribute. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action or get information back or update a configuration + attribute. - Manages virtual media. - Supports getting information back via GET method. - Supports updating a configuration attribute via PATCH method. @@ -54,7 +53,7 @@ options: type: str auth_token: description: - - Security token for authentication with OOB controller + - Security token for authentication with OOB controller. type: str timeout: description: @@ -120,181 +119,181 @@ options: type: dict author: "Yuyan Pan (@panyy3)" -''' +""" -EXAMPLES = ''' - - name: Insert Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - media_types: - - CD - - DVD - resource_id: "1" +EXAMPLES = r""" +- name: Insert Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + media_types: + - CD + - DVD + resource_id: "1" - - name: Eject Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - resource_id: "1" +- name: Eject Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + resource_id: "1" - - name: Eject all Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_id: "1" +- name: Eject all Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_id: "1" - - name: Get ComputeSystem Oem property SystemStatus via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - register: result - - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" +- name: Get ComputeSystem Oem property SystemStatus via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + register: result +- ansible.builtin.debug: + msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" - - name: Get Oem DNS setting via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" - register: result +- name: Get Oem DNS setting via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.data }}" - - name: Get Lenovo FoD key collection resource via GetCollectionResource command - community.general.xcc_redfish_command: - category: Raw - command: GetCollectionResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" - register: result +- name: Get Lenovo FoD key collection resource via GetCollectionResource command + community.general.xcc_redfish_command: + category: Raw + command: GetCollectionResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data_list }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.data_list }}" - - name: Update ComputeSystem property AssetTag via PatchResource command - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - request_body: - AssetTag: "new_asset_tag" +- name: Update ComputeSystem property AssetTag via PatchResource command + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + request_body: + AssetTag: "new_asset_tag" - - name: Perform BootToBIOSSetup action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" - request_body: {} +- name: Perform BootToBIOSSetup action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" + request_body: {} - - name: Perform SecureBoot.ResetKeys action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" - request_body: - ResetKeysType: DeleteAllKeys +- name: Perform SecureBoot.ResetKeys action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" + request_body: + ResetKeysType: DeleteAllKeys - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Update Manager DateTimeLocalOffset property using security token for auth - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - resource_uri: "/redfish/v1/Managers/1" - request_body: - DateTimeLocalOffset: "+08:00" +- name: Update Manager DateTimeLocalOffset property using security token for auth + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + resource_uri: "/redfish/v1/Managers/1" + request_body: + DateTimeLocalOffset: "+08:00" - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" -''' +- name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: A message related to the performed action(s). - returned: when failure or action/update success - type: str - sample: "Action was successful" + description: A message related to the performed action(s). + returned: when failure or action/update success + type: str + sample: "Action was successful" redfish_facts: - description: Resource content. - returned: when command == GetResource or command == GetCollectionResource - type: dict - sample: '{ - "redfish_facts": { - "data": { - "@odata.etag": "\"3179bf00d69f25a8b3c\"", - "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", - "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", - "DDNS": [ - { - "DDNSEnable": true, - "DomainName": "", - "DomainNameSource": "DHCP" - } - ], - "DNSEnable": true, - "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", - "IPv4Address1": "10.103.62.178", - "IPv4Address2": "0.0.0.0", - "IPv4Address3": "0.0.0.0", - "IPv6Address1": "::", - "IPv6Address2": "::", - "IPv6Address3": "::", - "Id": "LenovoDNS", - "PreferredAddresstype": "IPv4" - }, - "ret": true - } - }' -''' + description: Resource content. + returned: when command == GetResource or command == GetCollectionResource + type: dict + sample: '{ + "redfish_facts": { + "data": { + "@odata.etag": "\"3179bf00d69f25a8b3c\"", + "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", + "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", + "DDNS": [ + { + "DDNSEnable": true, + "DomainName": "", + "DomainNameSource": "DHCP" + } + ], + "DNSEnable": true, + "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", + "IPv4Address1": "10.103.62.178", + "IPv4Address2": "0.0.0.0", + "IPv4Address3": "0.0.0.0", + "IPv6Address1": "::", + "IPv6Address2": "::", + "IPv6Address3": "::", + "Id": "LenovoDNS", + "PreferredAddresstype": "IPv4" + }, + "ret": true + } + }' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/xenserver_facts.py b/plugins/modules/xenserver_facts.py index 685522f499..a3840e0e57 100644 --- a/plugins/modules/xenserver_facts.py +++ b/plugins/modules/xenserver_facts.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: xenserver_facts short_description: Get facts reported on xenserver description: - - Reads data out of XenAPI, can be used instead of multiple xe commands. + - Reads data out of XenAPI, can be used instead of multiple C(xe) commands. author: - Andy Hill (@andyhky) - Tim Rupp (@caphrim007) @@ -28,9 +27,9 @@ attributes: version_added: 3.3.0 # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather facts from xenserver community.general.xenserver_facts: @@ -48,7 +47,7 @@ EXAMPLES = ''' # "item": "Control domain on host: 10.0.13.22", # "msg": "Control domain on host: 10.0.13.22" # } -''' +""" HAVE_XENAPI = False diff --git a/plugins/modules/xenserver_guest.py b/plugins/modules/xenserver_guest.py index 110bc88751..16d928874c 100644 --- a/plugins/modules/xenserver_guest.py +++ b/plugins/modules/xenserver_guest.py @@ -8,43 +8,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: xenserver_guest short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to create new virtual machines from templates or other virtual machines, - modify various virtual machine components like network and disk, rename a virtual machine and - remove a virtual machine with associated components. +description: >- + This module can be used to create new virtual machines from templates or other virtual machines, modify various virtual machine components like + network and disk, rename a virtual machine and remove a virtual machine with associated components. author: -- Bojan Vitnik (@bvitnik) + - Bojan Vitnik (@bvitnik) notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in O(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for O(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=false) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- 'Network configuration inside a guest OS, by using O(networks[].type), O(networks[].ip), O(networks[].gateway) etc. parameters, is supported on - XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to - detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest - agent only support None and Static types of network configuration, where None means DHCP configured interface, O(networks[].type) and O(networks[].type6) - values V(none) and V(dhcp) have same effect. More info here: - U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' -- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore - C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or through - WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user - to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. - Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any - parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration through xenstore is most - useful for bootstrapping newly deployed VMs, much less for reconfiguring existing ones. More info here: - U(https://support.citrix.com/article/CTX226713)' + - Minimal supported version of XenServer is 5.6. + - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix + Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible + Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' + - 'If no scheme is specified in O(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you + are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' + - 'To use C(https://) scheme for O(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=false) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' + - 'Network configuration inside a guest OS, by using O(networks[].type), O(networks[].ip), O(networks[].gateway) etc. parameters, is supported + on XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try + to detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest + agent only support None and Static types of network configuration, where None means DHCP configured interface, O(networks[].type) and O(networks[].type6) + values V(none) and V(dhcp) have same effect. More info here: + U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html).' + - 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore + C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or through WMI + interface on Windows guests. + They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user to implement a boot time + scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. Take note that for xenstore + data to become available inside a guest, a VM restart is needed hence module will require VM restart if any parameter is changed. This is + a limitation of XenAPI and xenstore. Considering these limitations, network configuration through xenstore is most useful for bootstrapping + newly deployed VMs, much less for reconfiguring existing ones. More info here: U(https://support.citrix.com/article/CTX226713).' requirements: -- XenAPI + - XenAPI attributes: check_mode: support: full @@ -53,248 +51,249 @@ attributes: options: state: description: - - Specify the state VM should be in. - - If O(state) is set to V(present) and VM exists, ensure the VM configuration conforms to given parameters. - - If O(state) is set to V(present) and VM does not exist, then VM is deployed with given parameters. - - If O(state) is set to V(absent) and VM exists, then VM is removed with its associated components. - - If O(state) is set to V(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. + - Specify the state VM should be in. + - If O(state) is set to V(present) and VM exists, ensure the VM configuration conforms to given parameters. + - If O(state) is set to V(present) and VM does not exist, then VM is deployed with given parameters. + - If O(state) is set to V(absent) and VM exists, then VM is removed with its associated components. + - If O(state) is set to V(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. type: str default: present - choices: [ present, absent, poweredon ] + choices: [present, absent, poweredon] name: description: - - Name of the VM to work with. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. + - Name of the VM to work with. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. type: str - aliases: [ name_label ] + aliases: [name_label] name_desc: description: - - VM description. + - VM description. type: str uuid: description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. + - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. type: str template: description: - - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. - - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. - - In case of multiple templates/VMs/snapshots with same name, use O(template_uuid) to uniquely specify source template. - - If VM already exists, this setting will be ignored. - - This parameter is case sensitive. + - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. + - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are + found. + - In case of multiple templates/VMs/snapshots with same name, use O(template_uuid) to uniquely specify source template. + - If VM already exists, this setting will be ignored. + - This parameter is case sensitive. type: str - aliases: [ template_src ] + aliases: [template_src] template_uuid: description: - - UUID of a template, an existing VM or a snapshot that should be used to create VM. - - It is required if template name is not unique. + - UUID of a template, an existing VM or a snapshot that should be used to create VM. + - It is required if template name is not unique. type: str is_template: description: - - Convert VM to template. + - Convert VM to template. type: bool default: false folder: description: - - Destination folder for VM. - - This parameter is case sensitive. - - 'Example:' - - ' folder: /folder1/folder2' + - Destination folder for VM. + - This parameter is case sensitive. + - 'Example:' + - ' folder: /folder1/folder2' type: str hardware: description: - - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. + - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. type: dict suboptions: num_cpus: description: - - Number of CPUs. + - Number of CPUs. type: int num_cpu_cores_per_socket: description: - - Number of Cores Per Socket. O(hardware.num_cpus) has to be a multiple of O(hardware.num_cpu_cores_per_socket). + - Number of Cores Per Socket. O(hardware.num_cpus) has to be a multiple of O(hardware.num_cpu_cores_per_socket). type: int memory_mb: description: - - Amount of memory in MB. + - Amount of memory in MB. type: int disks: description: - - A list of disks to add to VM. - - All parameters are case sensitive. - - Removing or detaching existing disks of VM is not supported. - - New disks are required to have either a O(disks[].size) or one of O(ignore:disks[].size_[tb,gb,mb,kb,b]) parameters specified. - - VM needs to be shut down to reconfigure disk size. + - A list of disks to add to VM. + - All parameters are case sensitive. + - Removing or detaching existing disks of VM is not supported. + - New disks are required to have either a O(disks[].size) or one of O(ignore:disks[].size_[tb,gb,mb,kb,b]) parameters specified. + - VM needs to be shut down to reconfigure disk size. type: list elements: dict - aliases: [ disk ] + aliases: [disk] suboptions: size: description: - - 'Disk size with unit. Unit must be: V(b), V(kb), V(mb), V(gb), V(tb). VM needs to be shut down to reconfigure this parameter.' - - If no unit is specified, size is assumed to be in bytes. + - 'Disk size with unit. Unit must be: V(b), V(kb), V(mb), V(gb), V(tb). VM needs to be shut down to reconfigure this parameter.' + - If no unit is specified, size is assumed to be in bytes. type: str size_b: description: - - Disk size in bytes. + - Disk size in bytes. type: str size_kb: description: - - Disk size in kilobytes. + - Disk size in kilobytes. type: str size_mb: description: - - Disk size in megabytes. + - Disk size in megabytes. type: str size_gb: description: - - Disk size in gigabytes. + - Disk size in gigabytes. type: str size_tb: description: - - Disk size in terabytes. + - Disk size in terabytes. type: str name: description: - - Disk name. + - Disk name. type: str - aliases: [ name_label ] + aliases: [name_label] name_desc: description: - - Disk description. + - Disk description. type: str sr: description: - - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. + - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. type: str sr_uuid: description: - - UUID of a SR to create disk on. Use if SR name is not unique. + - UUID of a SR to create disk on. Use if SR name is not unique. type: str cdrom: description: - - A CD-ROM configuration for the VM. - - All parameters are case sensitive. + - A CD-ROM configuration for the VM. + - All parameters are case sensitive. type: dict suboptions: type: description: - - The type of CD-ROM. With V(none) the CD-ROM device will be present but empty. + - The type of CD-ROM. With V(none) the CD-ROM device will be present but empty. type: str - choices: [ none, iso ] + choices: [none, iso] iso_name: description: - - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies O(cdrom.type=iso)).' - - Required if O(cdrom.type) is set to V(iso). + - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies O(cdrom.type=iso)).' + - Required if O(cdrom.type) is set to V(iso). type: str networks: description: - - A list of networks (in the order of the NICs). - - All parameters are case sensitive. - - Name is required for new NICs. Other parameters are optional in all cases. + - A list of networks (in the order of the NICs). + - All parameters are case sensitive. + - Name is required for new NICs. Other parameters are optional in all cases. type: list elements: dict - aliases: [ network ] + aliases: [network] suboptions: - name: - description: + name: + description: - Name of a XenServer network to attach the network interface to. - type: str - aliases: [ name_label ] - mac: - description: + type: str + aliases: [name_label] + mac: + description: - Customize MAC address of the interface. - type: str - type: - description: - - Type of IPv4 assignment. Value V(none) means whatever is default for OS. - - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). - type: str - choices: [ none, dhcp, static ] - ip: - description: - - 'Static IPv4 address (implies O(networks[].type=static)). Can include prefix in format C(/) instead of using C(netmask).' - type: str - netmask: - description: + type: str + type: + description: + - Type of IPv4 assignment. Value V(none) means whatever is default for OS. + - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). + type: str + choices: [none, dhcp, static] + ip: + description: + - Static IPv4 address (implies O(networks[].type=static)). Can include prefix in format C(/) instead of using + C(netmask). + type: str + netmask: + description: - Static IPv4 netmask required for O(networks[].ip) if prefix is not specified. - type: str - gateway: - description: + type: str + gateway: + description: - Static IPv4 gateway. - type: str - type6: - description: + type: str + type6: + description: - Type of IPv6 assignment. Value V(none) means whatever is default for OS. - type: str - choices: [ none, dhcp, static ] - ip6: - description: + type: str + choices: [none, dhcp, static] + ip6: + description: - 'Static IPv6 address (implies O(networks[].type6=static)) with prefix in format C(/).' - type: str - gateway6: - description: + type: str + gateway6: + description: - Static IPv6 gateway. - type: str + type: str home_server: description: - - Name of a XenServer host that will be a Home Server for the VM. - - This parameter is case sensitive. + - Name of a XenServer host that will be a Home Server for the VM. + - This parameter is case sensitive. type: str custom_params: description: - - Define a list of custom VM params to set on VM. - - Useful for advanced users familiar with managing VM params through xe CLI. - - A custom value object takes two fields O(custom_params[].key) and O(custom_params[].value) (see example below). + - Define a list of custom VM params to set on VM. + - Useful for advanced users familiar with managing VM params through C(xe) CLI. + - A custom value object takes two fields O(custom_params[].key) and O(custom_params[].value) (see example below). type: list elements: dict suboptions: key: description: - - VM param name. + - VM param name. type: str required: true value: description: - - VM param value. + - VM param value. type: raw required: true wait_for_ip_address: description: - - Wait until XenServer detects an IP address for the VM. If O(state) is set to V(absent), this parameter is ignored. - - This requires XenServer Tools to be preinstalled on the VM to work properly. + - Wait until XenServer detects an IP address for the VM. If O(state) is set to V(absent), this parameter is ignored. + - This requires XenServer Tools to be preinstalled on the VM to work properly. type: bool default: false state_change_timeout: description: - - 'By default, module will wait indefinitely for VM to acquire an IP address if O(wait_for_ip_address=true).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. + - 'By default, module will wait indefinitely for VM to acquire an IP address if O(wait_for_ip_address=true).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. type: int default: 0 linked_clone: description: - - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. - - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. + - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. + - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. type: bool default: false force: description: - - Ignore warnings and complete the actions. - - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. + - Ignore warnings and complete the actions. + - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. type: bool default: false extends_documentation_fragment: -- community.general.xenserver.documentation -- community.general.attributes + - community.general.xenserver.documentation + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a VM from a template community.general.xenserver_guest: hostname: "{{ xenserver_hostname }}" @@ -305,8 +304,8 @@ EXAMPLES = r''' state: poweredon template: CentOS 7 disks: - - size_gb: 10 - sr: my_sr + - size_gb: 10 + sr: my_sr hardware: num_cpus: 6 num_cpu_cores_per_socket: 3 @@ -315,8 +314,8 @@ EXAMPLES = r''' type: iso iso_name: guest-tools.iso networks: - - name: VM Network - mac: aa:bb:dd:aa:00:14 + - name: VM Network + mac: aa:bb:dd:aa:00:14 wait_for_ip_address: true delegate_to: localhost register: deploy @@ -330,8 +329,8 @@ EXAMPLES = r''' name: testvm_6 is_template: true disk: - - size_gb: 10 - sr: my_sr + - size_gb: 10 + sr: my_sr hardware: memory_mb: 512 num_cpus: 1 @@ -365,8 +364,8 @@ EXAMPLES = r''' name: testvm_8 state: present custom_params: - - key: HVM_boot_params - value: { "order": "ndc" } + - key: HVM_boot_params + value: {"order": "ndc"} delegate_to: localhost - name: Customize network parameters @@ -376,154 +375,154 @@ EXAMPLES = r''' password: "{{ xenserver_password }}" name: testvm_10 networks: - - name: VM Network - ip: 192.168.1.100/24 - gateway: 192.168.1.1 - - type: dhcp + - name: VM Network + ip: 192.168.1.100/24 + gateway: 192.168.1.1 + - type: dhcp delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", + description: Metadata about the VM. + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" } + } changes: - description: Detected or made changes to VM - returned: always - type: list - sample: [ + description: Detected or made changes to VM. + returned: always + type: list + sample: [ + { + "hardware": [ + "num_cpus" + ] + }, + { + "disks_changed": [ + [], + [ + "size" + ] + ] + }, + { + "disks_new": [ { - "hardware": [ - "num_cpus" - ] - }, + "name": "new-disk", + "name_desc": "", + "position": 2, + "size_gb": "4", + "vbd_userdevice": "2" + } + ] + }, + { + "cdrom": [ + "type", + "iso_name" + ] + }, + { + "networks_changed": [ + [ + "mac" + ], + ] + }, + { + "networks_new": [ { - "disks_changed": [ - [], - [ - "size" - ] - ] - }, - { - "disks_new": [ - { - "name": "new-disk", - "name_desc": "", - "position": 2, - "size_gb": "4", - "vbd_userdevice": "2" - } - ] - }, - { - "cdrom": [ - "type", - "iso_name" - ] - }, - { - "networks_changed": [ - [ - "mac" - ], - ] - }, - { - "networks_new": [ - { - "name": "Pool-wide network associated with eth2", - "position": 1, - "vif_device": "1" - } - ] - }, - "need_poweredoff" - ] -''' + "name": "Pool-wide network associated with eth2", + "position": 1, + "vif_device": "1" + } + ] + }, + "need_poweredoff" + ] +""" import re diff --git a/plugins/modules/xenserver_guest_info.py b/plugins/modules/xenserver_guest_info.py index 68050f9509..10cd11839c 100644 --- a/plugins/modules/xenserver_guest_info.py +++ b/plugins/modules/xenserver_guest_info.py @@ -8,48 +8,46 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: xenserver_guest_info short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to gather essential VM facts. +description: This module can be used to gather essential VM facts. author: -- Bojan Vitnik (@bvitnik) + - Bojan Vitnik (@bvitnik) notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' + - Minimal supported version of XenServer is 5.6. + - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix + Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible + Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' + - 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you + are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' + - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=no) which + requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' requirements: -- XenAPI + - XenAPI options: name: description: - - Name of the VM to gather facts from. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. + - Name of the VM to gather facts from. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. type: str - aliases: [ name_label ] + aliases: [name_label] uuid: description: - - UUID of the VM to gather fact of. This is XenServer's unique identifier. - - It is required if name is not unique. + - UUID of the VM to gather fact of. This is XenServer's unique identifier. + - It is required if name is not unique. type: str extends_documentation_fragment: -- community.general.xenserver.documentation -- community.general.attributes -- community.general.attributes.info_module -''' + - community.general.xenserver.documentation + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts community.general.xenserver_guest_info: hostname: "{{ xenserver_hostname }}" @@ -58,11 +56,11 @@ EXAMPLES = r''' name: testvm_11 delegate_to: localhost register: facts -''' +""" -RETURN = r''' +RETURN = r""" instance: - description: Metadata about the VM + description: Metadata about the VM. returned: always type: dict sample: { @@ -147,7 +145,7 @@ instance: "vm-data": "" } } -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/xenserver_guest_powerstate.py b/plugins/modules/xenserver_guest_powerstate.py index c4e4f5976f..86a21b56dc 100644 --- a/plugins/modules/xenserver_guest_powerstate.py +++ b/plugins/modules/xenserver_guest_powerstate.py @@ -8,27 +8,25 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: xenserver_guest_powerstate short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. +description: This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. author: -- Bojan Vitnik (@bvitnik) + - Bojan Vitnik (@bvitnik) notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' + - Minimal supported version of XenServer is 5.6. + - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix + Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible + Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' + - 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you + are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' + - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: + no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' requirements: -- XenAPI + - XenAPI attributes: check_mode: support: full @@ -37,45 +35,44 @@ attributes: options: state: description: - - Specify the state VM should be in. - - If O(state) is set to value other than V(present), then VM is transitioned into required state and facts are returned. - - If O(state) is set to V(present), then VM is just checked for existence and facts are returned. + - Specify the state VM should be in. + - If O(state) is set to value other than V(present), then VM is transitioned into required state and facts are returned. + - If O(state) is set to V(present), then VM is just checked for existence and facts are returned. type: str default: present - choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] + choices: [powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present] name: description: - - Name of the VM to manage. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. + - Name of the VM to manage. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. type: str - aliases: [ name_label ] + aliases: [name_label] uuid: description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. type: str wait_for_ip_address: description: - - Wait until XenServer detects an IP address for the VM. - - This requires XenServer Tools to be preinstalled on the VM to work properly. + - Wait until XenServer detects an IP address for the VM. + - This requires XenServer Tools to be preinstalled on the VM to work properly. type: bool default: false state_change_timeout: description: - - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if O(wait_for_ip_address=true).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. + - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if O(wait_for_ip_address=true).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. type: int default: 0 extends_documentation_fragment: -- community.general.xenserver.documentation -- community.general.attributes + - community.general.xenserver.documentation + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Power on VM community.general.xenserver_guest_powerstate: hostname: "{{ xenserver_hostname }}" @@ -85,11 +82,11 @@ EXAMPLES = r''' state: powered-on delegate_to: localhost register: facts -''' +""" -RETURN = r''' +RETURN = r""" instance: - description: Metadata about the VM + description: Metadata about the VM. returned: always type: dict sample: { @@ -174,7 +171,7 @@ instance: "vm-data": "" } } -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 8bb0abc273..b925e624c8 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -8,26 +8,25 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: xfconf author: -- "Joseph Benden (@jbenden)" -- "Alexei Znamensky (@russoz)" + - "Joseph Benden (@jbenden)" + - "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: -- This module allows for the manipulation of Xfce 4 Configuration with the help of C(xfconf-query). + - This module allows for the manipulation of Xfce 4 Configuration with the help of C(xfconf-query). seealso: -- name: xfconf-query(1) man page - description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. - link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' + - name: xfconf-query(1) man page + description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. + link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' -- name: xfconf - Configuration Storage System - description: XFCE documentation for the Xfconf configuration system. - link: 'https://docs.xfce.org/xfce/xfconf/start' + - name: xfconf - Configuration Storage System + description: XFCE documentation for the Xfconf configuration system. + link: 'https://docs.xfce.org/xfce/xfconf/start' extends_documentation_fragment: -- community.general.attributes + - community.general.attributes attributes: check_mode: @@ -38,50 +37,49 @@ attributes: options: channel: description: - - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application - properties/keys are stored. See man xfconf-query(1). + - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application + properties/keys are stored. See man xfconf-query(1). required: true type: str property: description: - - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man xfconf-query(1). + - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man xfconf-query(1). required: true type: str value: description: - - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See man xfconf-query(1). + - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See man xfconf-query(1). type: list elements: raw value_type: description: - - The type of value being set. - - When providing more than one O(value_type), the length of the list must be equal to the length of O(value). - - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) will be applied to all elements of - O(value). - - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) must be used to - ensure that C(xfconf-query) will interpret the value as an array rather than a scalar. - - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. + - The type of value being set. + - When providing more than one O(value_type), the length of the list must be equal to the length of O(value). + - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) will be applied to all elements + of O(value). + - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) must be used + to ensure that C(xfconf-query) will interpret the value as an array rather than a scalar. + - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. type: list elements: str choices: [string, int, double, bool, uint, uchar, char, uint64, int64, float] state: type: str description: - - The action to take upon the property/value. - - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. + - The action to take upon the property/value. + - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. choices: [present, absent] default: "present" force_array: description: - - Force array even if only one element. + - Force array even if only one element. type: bool default: false aliases: ['array'] version_added: 1.0.0 """ -EXAMPLES = """ ---- +EXAMPLES = r""" - name: Change the DPI to "192" xfconf: channel: "xsettings" @@ -105,57 +103,56 @@ EXAMPLES = """ force_array: true """ -RETURN = """ ---- +RETURN = r""" channel: - description: The channel specified in the module parameters + description: The channel specified in the module parameters. returned: success type: str sample: "xsettings" property: - description: The property specified in the module parameters + description: The property specified in the module parameters. returned: success type: str sample: "/Xft/DPI" value_type: description: - - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings for array types. - - This is a string or a list of strings. + - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings for array types. + - This is a string or a list of strings. returned: success type: any sample: '"int" or ["str", "str", "str"]' value: description: - - The value of the preference key after executing the module. Either a single string value or a list of strings for array types. - - This is a string or a list of strings. + - The value of the preference key after executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. returned: success type: any - sample: '"192" or ["orange", "yellow", "violet"]' + sample: "'192' or ['orange', 'yellow', 'violet']" previous_value: description: - - The value of the preference key before executing the module. Either a single string value or a list of strings for array types. - - This is a string or a list of strings. + - The value of the preference key before executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. returned: success type: any sample: '"96" or ["red", "blue", "green"]' cmd: description: - - A list with the resulting C(xfconf-query) command executed by the module. + - A list with the resulting C(xfconf-query) command executed by the module. returned: success type: list elements: str version_added: 5.4.0 sample: - - /usr/bin/xfconf-query - - --channel - - xfce4-panel - - --property - - /plugins/plugin-19/timezone - - --create - - --type - - string - - --set - - Pacific/Auckland + - /usr/bin/xfconf-query + - --channel + - xfce4-panel + - --property + - /plugins/plugin-19/timezone + - --create + - --type + - string + - --set + - Pacific/Auckland """ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index aba0d912ff..d8e6acc50d 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -7,18 +7,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: xfconf_info author: -- "Alexei Znamensky (@russoz)" + - "Alexei Znamensky (@russoz)" short_description: Retrieve XFCE4 configurations version_added: 3.5.0 description: -- This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). + - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). extends_documentation_fragment: -- community.general.attributes -- community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module attributes: check_mode: version_added: 3.3.0 @@ -26,26 +25,21 @@ attributes: options: channel: description: - - > - A Xfconf preference channel is a top-level tree key, inside of the - Xfconf repository that corresponds to the location for which all - application properties/keys are stored. - - If not provided, the module will list all available channels. + - "A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application + properties/keys are stored." + - If not provided, the module will list all available channels. type: str property: description: - - > - A Xfce preference key is an element in the Xfconf repository - that corresponds to an application preference. - - If provided, then O(channel) is required. - - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel). + - "A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference." + - If provided, then O(channel) is required. + - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel). type: str notes: -- See man xfconf-query(1) for more details. + - See man xfconf-query(1) for more details. """ -EXAMPLES = """ ---- +EXAMPLES = r""" - name: Get list of all available channels community.general.xfconf_info: {} register: result @@ -68,63 +62,62 @@ EXAMPLES = """ register: result """ -RETURN = """ ---- +RETURN = r""" channels: description: - - List of available channels. - - Returned when the module receives no parameter at all. + - List of available channels. + - Returned when the module receives no parameter at all. returned: success type: list elements: str sample: - - xfce4-desktop - - displays - - xsettings - - xfwm4 + - xfce4-desktop + - displays + - xsettings + - xfwm4 properties: description: - - List of available properties for a specific channel. - - Returned by passing only the O(channel) parameter to the module. + - List of available properties for a specific channel. + - Returned by passing only the O(channel) parameter to the module. returned: success type: list elements: str sample: - - /Gdk/WindowScalingFactor - - /Gtk/ButtonImages - - /Gtk/CursorThemeSize - - /Gtk/DecorationLayout - - /Gtk/FontName - - /Gtk/MenuImages - - /Gtk/MonospaceFontName - - /Net/DoubleClickTime - - /Net/IconThemeName - - /Net/ThemeName - - /Xft/Antialias - - /Xft/Hinting - - /Xft/HintStyle - - /Xft/RGBA + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA is_array: description: - - Flag indicating whether the property is an array or not. + - Flag indicating whether the property is an array or not. returned: success type: bool value: description: - - The value of the property. Empty if the property is of array type. + - The value of the property. Empty if the property is of array type. returned: success type: str sample: Monospace 10 value_array: description: - - The array value of the property. Empty if the property is not of array type. + - The array value of the property. Empty if the property is not of array type. returned: success type: list elements: str sample: - - Main - - Work - - Tmp + - Main + - Work + - Tmp """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper diff --git a/plugins/modules/xfs_quota.py b/plugins/modules/xfs_quota.py index 6d05219905..3b0b2bd19e 100644 --- a/plugins/modules/xfs_quota.py +++ b/plugins/modules/xfs_quota.py @@ -12,7 +12,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" ---- module: xfs_quota short_description: Manage quotas on XFS filesystems description: @@ -85,7 +84,7 @@ options: - absent requirements: - - xfsprogs + - xfsprogs """ EXAMPLES = r""" @@ -109,40 +108,39 @@ EXAMPLES = r""" mountpoint: /home isoft: 1024 ihard: 2048 - """ RETURN = r""" bhard: - description: the current bhard setting in bytes - returned: always - type: int - sample: 1024 + description: The current C(bhard) setting in bytes. + returned: always + type: int + sample: 1024 bsoft: - description: the current bsoft setting in bytes - returned: always - type: int - sample: 1024 + description: The current C(bsoft) setting in bytes. + returned: always + type: int + sample: 1024 ihard: - description: the current ihard setting in bytes - returned: always - type: int - sample: 100 + description: The current C(ihard) setting in bytes. + returned: always + type: int + sample: 100 isoft: - description: the current isoft setting in bytes - returned: always - type: int - sample: 100 + description: The current C(isoft) setting in bytes. + returned: always + type: int + sample: 100 rtbhard: - description: the current rtbhard setting in bytes - returned: always - type: int - sample: 1024 + description: The current C(rtbhard) setting in bytes. + returned: always + type: int + sample: 1024 rtbsoft: - description: the current rtbsoft setting in bytes - returned: always - type: int - sample: 1024 + description: The current C(rtbsoft) setting in bytes. + returned: always + type: int + sample: 1024 """ import grp diff --git a/plugins/modules/xml.py b/plugins/modules/xml.py index f5cdbeac38..b06b8051a2 100644 --- a/plugins/modules/xml.py +++ b/plugins/modules/xml.py @@ -11,8 +11,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: xml short_description: Manage bits and pieces of XML files or strings description: @@ -27,96 +26,94 @@ attributes: options: path: description: - - Path to the file to operate on. - - This file must exist ahead of time. - - This parameter is required, unless O(xmlstring) is given. + - Path to the file to operate on. + - This file must exist ahead of time. + - This parameter is required, unless O(xmlstring) is given. type: path - aliases: [ dest, file ] + aliases: [dest, file] xmlstring: description: - - A string containing XML on which to operate. - - This parameter is required, unless O(path) is given. + - A string containing XML on which to operate. + - This parameter is required, unless O(path) is given. type: str xpath: description: - - A valid XPath expression describing the item(s) you want to manipulate. - - Operates on the document root, V(/), by default. + - A valid XPath expression describing the item(s) you want to manipulate. + - Operates on the document root, V(/), by default. type: str namespaces: description: - - The namespace C(prefix:uri) mapping for the XPath expression. - - Needs to be a C(dict), not a C(list) of items. + - The namespace C(prefix:uri) mapping for the XPath expression. + - Needs to be a C(dict), not a C(list) of items. type: dict default: {} state: description: - - Set or remove an xpath selection (node(s), attribute(s)). + - Set or remove an xpath selection (node(s), attribute(s)). type: str - choices: [ absent, present ] + choices: [absent, present] default: present - aliases: [ ensure ] + aliases: [ensure] attribute: description: - - The attribute to select when using parameter O(value). - - This is a string, not prepended with V(@). + - The attribute to select when using parameter O(value). + - This is a string, not prepended with V(@). type: raw value: description: - - Desired state of the selected attribute. - - Either a string, or to unset a value, the Python V(None) keyword (YAML Equivalent, V(null)). - - Elements default to no value (but present). - - Attributes default to an empty string. + - Desired state of the selected attribute. + - Either a string, or to unset a value, the Python V(None) keyword (YAML Equivalent, V(null)). + - Elements default to no value (but present). + - Attributes default to an empty string. type: raw add_children: description: - - Add additional child-element(s) to a selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string - (for example C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires O(xpath) to be set. + - Add additional child-element(s) to a selected element for a given O(xpath). + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() + child element), or a hash where the key is an element name and the value is the element value. + - This parameter requires O(xpath) to be set. type: list elements: raw set_children: description: - - Set the child-element(s) of a selected element for a given O(xpath). - - Removes any existing children. - - Child elements must be specified as in O(add_children). - - This parameter requires O(xpath) to be set. + - Set the child-element(s) of a selected element for a given O(xpath). + - Removes any existing children. + - Child elements must be specified as in O(add_children). + - This parameter requires O(xpath) to be set. type: list elements: raw count: description: - - Search for a given O(xpath) and provide the count of any matches. - - This parameter requires O(xpath) to be set. + - Search for a given O(xpath) and provide the count of any matches. + - This parameter requires O(xpath) to be set. type: bool default: false print_match: description: - - Search for a given O(xpath) and print out any matches. - - This parameter requires O(xpath) to be set. + - Search for a given O(xpath) and print out any matches. + - This parameter requires O(xpath) to be set. type: bool default: false pretty_print: description: - - Pretty print XML output. + - Pretty print XML output. type: bool default: false content: description: - - Search for a given O(xpath) and get content. - - This parameter requires O(xpath) to be set. + - Search for a given O(xpath) and get content. + - This parameter requires O(xpath) to be set. type: str - choices: [ attribute, text ] + choices: [attribute, text] input_type: description: - - Type of input for O(add_children) and O(set_children). + - Type of input for O(add_children) and O(set_children). type: str - choices: [ xml, yaml ] + choices: [xml, yaml] default: yaml backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: false strip_cdata_tags: @@ -128,46 +125,44 @@ options: insertbefore: description: - Add additional child-element(s) before the first selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string - (for example C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() + child element), or a hash where the key is an element name and the value is the element value. - This parameter requires O(xpath) to be set. type: bool default: false insertafter: description: - Add additional child-element(s) after the last selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string - (for example C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() + child element), or a hash where the key is an element name and the value is the element value. - This parameter requires O(xpath) to be set. type: bool default: false requirements: -- lxml >= 2.3.0 + - lxml >= 2.3.0 notes: -- Use the C(--check) and C(--diff) options when testing your expressions. -- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. -- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. -- Beware that in case your XML elements are namespaced, you need to use the O(namespaces) parameter, see the examples. -- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. + - Use the C(--check) and C(--diff) options when testing your expressions. + - The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. + - This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. + - Beware that in case your XML elements are namespaced, you need to use the O(namespaces) parameter, see the examples. + - Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. seealso: -- name: Xml module development community wiki - description: More information related to the development of this xml module. - link: https://github.com/ansible/community/wiki/Module:-xml -- name: Introduction to XPath - description: A brief tutorial on XPath (w3schools.com). - link: https://www.w3schools.com/xml/xpath_intro.asp -- name: XPath Reference document - description: The reference documentation on XSLT/XPath (developer.mozilla.org). - link: https://developer.mozilla.org/en-US/docs/Web/XPath + - name: XML module development community wiki (archived) + description: More information related to the development of this xml module. + link: https://github.com/ansible/community/wiki/Module:-xml + - name: Introduction to XPath + description: A brief tutorial on XPath (w3schools.com). + link: https://www.w3schools.com/xml/xpath_intro.asp + - name: XPath Reference document + description: The reference documentation on XSLT/XPath (developer.mozilla.org). + link: https://developer.mozilla.org/en-US/docs/Web/XPath author: -- Tim Bielawa (@tbielawa) -- Magnus Hedemark (@magnus919) -- Dag Wieers (@dagwieers) -''' + - Tim Bielawa (@tbielawa) + - Magnus Hedemark (@magnus919) + - Dag Wieers (@dagwieers) +""" -EXAMPLES = r''' +EXAMPLES = r""" # Consider the following XML file: # # @@ -219,9 +214,9 @@ EXAMPLES = r''' path: /foo/bar.xml xpath: /business/beers add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon - name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element community.general.xml: @@ -229,9 +224,9 @@ EXAMPLES = r''' xpath: '/business/beers/beer[text()="Rochefort 10"]' insertbefore: true add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon # NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements - name: Add a 'validxhtml' element to the 'website' element @@ -301,14 +296,14 @@ EXAMPLES = r''' xpath: /business add_children: - building: - # Attributes + # Attributes name: Scumm bar location: Monkey island - # Subnodes + # Subnodes _: - floor: Pirate hall - floor: Grog storage - - construction_date: "1990" # Only strings are valid + - construction_date: "1990" # Only strings are valid - building: Grog factory # Consider this XML for following example - @@ -327,37 +322,37 @@ EXAMPLES = r''' path: bar.xml xpath: /config/element[@name='test1'] state: absent -''' +""" -RETURN = r''' +RETURN = r""" actions: - description: A dictionary with the original xpath, namespaces and state. - type: dict - returned: success - sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} + description: A dictionary with the original xpath, namespaces and state. + type: dict + returned: success + sample: {xpath: xpath, namespaces: [namespace1, namespace2], state: present} backup_file: - description: The name of the backup file that was created - type: str - returned: when O(backup=true) - sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ + description: The name of the backup file that was created. + type: str + returned: when O(backup=true) + sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ count: - description: The count of xpath matches. - type: int - returned: when parameter 'count' is set - sample: 2 + description: The count of xpath matches. + type: int + returned: when parameter O(count) is set + sample: 2 matches: - description: The xpath matches found. - type: list - returned: when parameter 'print_match' is set + description: The xpath matches found. + type: list + returned: when parameter O(print_match) is set msg: - description: A message related to the performed action(s). - type: str - returned: always + description: A message related to the performed action(s). + type: str + returned: always xmlstring: - description: An XML string of the resulting output. - type: str - returned: when parameter 'xmlstring' is set -''' + description: An XML string of the resulting output. + type: str + returned: when parameter O(xmlstring) is set +""" import copy import json From d5237ee4869afa42efab8c255d701422916e07c3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 23 Dec 2024 19:19:50 +0100 Subject: [PATCH 395/482] Add sanity test for action groups (#9294) * Add sanity test for action groups. * Fix interpolation. * Fix message. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * Improve regex. * Add entry for new 'keycloak' action group. Ref: https://github.com/ansible-collections/community.general/pull/9284 --------- Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- tests/sanity/extra/action-group.json | 12 ++ tests/sanity/extra/action-group.json.license | 3 + tests/sanity/extra/action-group.py | 134 +++++++++++++++++++ 3 files changed, 149 insertions(+) create mode 100644 tests/sanity/extra/action-group.json create mode 100644 tests/sanity/extra/action-group.json.license create mode 100755 tests/sanity/extra/action-group.py diff --git a/tests/sanity/extra/action-group.json b/tests/sanity/extra/action-group.json new file mode 100644 index 0000000000..db6a92bcb7 --- /dev/null +++ b/tests/sanity/extra/action-group.json @@ -0,0 +1,12 @@ +{ + "include_symlinks": true, + "prefixes": [ + "meta/runtime.yml", + "plugins/modules/", + "tests/sanity/extra/action-group." + ], + "output": "path-message", + "requirements": [ + "pyyaml" + ] +} diff --git a/tests/sanity/extra/action-group.json.license b/tests/sanity/extra/action-group.json.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/sanity/extra/action-group.json.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/sanity/extra/action-group.py b/tests/sanity/extra/action-group.py new file mode 100755 index 0000000000..9c82ff8619 --- /dev/null +++ b/tests/sanity/extra/action-group.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# Copyright (c) 2024, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +"""Make sure all modules that should show up in the action group.""" + +from __future__ import annotations + +import os +import re +import yaml + + +ACTION_GROUPS = { + # The format is as follows: + # * 'pattern': a regular expression matching all module names potentially belonging to the action group; + # * 'exclusions': a list of modules that are not part of the action group; all other modules matching 'pattern' must be part of it; + # * 'doc_fragment': the docs fragment that documents membership of the action group. + 'consul': { + 'pattern': re.compile('^consul_.*$'), + 'exclusions': [ + 'consul_acl_bootstrap', + 'consul_kv', + ], + 'doc_fragment': 'community.general.consul.actiongroup_consul', + }, + 'keycloak': { + 'pattern': re.compile('^keycloak_.*$'), + 'exclusions': [ + 'keycloak_realm_info', + ], + 'doc_fragment': 'community.general.keycloak.actiongroup_keycloak', + }, + 'proxmox': { + 'pattern': re.compile('^proxmox(_.*)?$'), + 'exclusions': [], + 'doc_fragment': 'community.general.proxmox.actiongroup_proxmox', + }, +} + + +def main(): + """Main entry point.""" + + # Load redirects + meta_runtime = 'meta/runtime.yml' + self_path = 'tests/sanity/extra/action-group.py' + try: + with open(meta_runtime, 'rb') as f: + data = yaml.safe_load(f) + action_groups = data['action_groups'] + except Exception as exc: + print(f'{meta_runtime}: cannot load action groups: {exc}') + return + + for action_group in action_groups: + if action_group not in ACTION_GROUPS: + print(f'{meta_runtime}: found unknown action group {action_group!r}; likely {self_path} needs updating') + for action_group, action_group_data in list(ACTION_GROUPS.items()): + if action_group not in action_groups: + print(f'{meta_runtime}: cannot find action group {action_group!r}; likely {self_path} needs updating') + + modules_directory = 'plugins/modules/' + modules_suffix = '.py' + + for file in os.listdir(modules_directory): + if not file.endswith(modules_suffix): + continue + module_name = file[:-len(modules_suffix)] + + for action_group, action_group_data in ACTION_GROUPS.items(): + action_group_content = action_groups.get(action_group) or [] + path = os.path.join(modules_directory, file) + + if not action_group_data['pattern'].match(module_name): + if module_name in action_group_content: + print(f'{path}: module is in action group {action_group!r} despite not matching its pattern as defined in {self_path}') + continue + + should_be_in_action_group = module_name not in action_group_data['exclusions'] + + if should_be_in_action_group: + if module_name not in action_group_content: + print(f'{meta_runtime}: module {module_name!r} is not part of {action_group!r} action group') + else: + action_group_content.remove(module_name) + + documentation = [] + in_docs = False + with open(path, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith('DOCUMENTATION ='): + in_docs = True + elif line.startswith(("'''", '"""')) and in_docs: + in_docs = False + elif in_docs: + documentation.append(line) + if in_docs: + print(f'{path}: cannot find DOCUMENTATION end') + if not documentation: + print(f'{path}: cannot find DOCUMENTATION') + continue + + try: + docs = yaml.safe_load('\n'.join(documentation)) + if not isinstance(docs, dict): + raise Exception('is not a top-level dictionary') + except Exception as exc: + print(f'{path}: cannot load DOCUMENTATION as YAML: {exc}') + continue + + docs_fragments = docs.get('extends_documentation_fragment') or [] + is_in_action_group = action_group_data['doc_fragment'] in docs_fragments + + if should_be_in_action_group != is_in_action_group: + if should_be_in_action_group: + print( + f'{path}: module does not document itself as part of action group {action_group!r}, but it should;' + f' you need to add {action_group_data["doc_fragment"]} to "extends_documentation_fragment" in DOCUMENTATION' + ) + else: + print(f'{path}: module documents itself as part of action group {action_group!r}, but it should not be') + + for action_group, action_group_data in ACTION_GROUPS.items(): + action_group_content = action_groups.get(action_group) or [] + for module_name in action_group_content: + print( + f'{meta_runtime}: module {module_name} mentioned in {action_group!r} action group' + f' does not exist or does not match pattern defined in {self_path}' + ) + + +if __name__ == '__main__': + main() From c141f8688305c78c0dc53636f6906dc879221915 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 24 Dec 2024 09:30:11 +1300 Subject: [PATCH 396/482] utm*: normalize docs (#9335) * utm*: normalize docs * Update plugins/modules/utm_aaa_group.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/utm_aaa_group.py | 311 ++++++----- plugins/modules/utm_aaa_group_info.py | 147 +++--- plugins/modules/utm_ca_host_key_cert.py | 164 +++--- plugins/modules/utm_ca_host_key_cert_info.py | 104 ++-- plugins/modules/utm_dns_host.py | 191 ++++--- .../modules/utm_network_interface_address.py | 148 +++--- .../utm_network_interface_address_info.py | 98 ++-- plugins/modules/utm_proxy_auth_profile.py | 494 +++++++++--------- plugins/modules/utm_proxy_exception.py | 337 ++++++------ plugins/modules/utm_proxy_frontend.py | 397 +++++++------- plugins/modules/utm_proxy_frontend_info.py | 182 ++++--- plugins/modules/utm_proxy_location.py | 293 +++++------ plugins/modules/utm_proxy_location_info.py | 146 +++--- 13 files changed, 1487 insertions(+), 1525 deletions(-) diff --git a/plugins/modules/utm_aaa_group.py b/plugins/modules/utm_aaa_group.py index 9c595284da..b29f3d50af 100644 --- a/plugins/modules/utm_aaa_group.py +++ b/plugins/modules/utm_aaa_group.py @@ -8,120 +8,117 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_aaa_group author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy an aaa group object in Sophos UTM description: - - Create, update or destroy an aaa group object in Sophos UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy an aaa group object in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry. - type: str - required: true - adirectory_groups: - description: - - List of adirectory group strings. - type: list - elements: str - default: [] - adirectory_groups_sids: - description: - - Dictionary of group sids. - type: dict - default: {} - backend_match: - description: - - The backend for the group. - type: str - choices: - - none - - adirectory - - edirectory - - radius - - tacacs - - ldap - default: none - comment: - description: - - Comment that describes the AAA group. - type: str - default: '' - dynamic: - description: - - Group type. Is static if none is selected. - type: str - default: none - choices: - - none - - ipsec_dn - - directory_groups - edirectory_groups: - description: - - List of edirectory group strings. - type: list - elements: str - default: [] - ipsec_dn: - description: - - The ipsec dn string. - type: str - default: '' - ldap_attribute: - description: - - The ldap attribute to check against. - type: str - default: '' - ldap_attribute_value: - description: - - The ldap attribute value to check against. - type: str - default: '' - members: - description: - - A list of user ref names (aaa/user). - type: list - elements: str - default: [] - network: - description: - - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa). - type: str - default: "" - radius_groups: - description: - - A list of radius group strings. - type: list - elements: str - default: [] - tacacs_groups: - description: - - A list of tacacs group strings. - type: list - elements: str - default: [] + name: + description: + - The name of the object. Will be used to identify the entry. + type: str + required: true + adirectory_groups: + description: + - List of adirectory group strings. + type: list + elements: str + default: [] + adirectory_groups_sids: + description: + - Dictionary of group sids. + type: dict + default: {} + backend_match: + description: + - The backend for the group. + type: str + choices: + - none + - adirectory + - edirectory + - radius + - tacacs + - ldap + default: none + comment: + description: + - Comment that describes the AAA group. + type: str + default: '' + dynamic: + description: + - Group type. Is static if none is selected. + type: str + default: none + choices: + - none + - ipsec_dn + - directory_groups + edirectory_groups: + description: + - List of edirectory group strings. + type: list + elements: str + default: [] + ipsec_dn: + description: + - The ipsec dn string. + type: str + default: '' + ldap_attribute: + description: + - The ldap attribute to check against. + type: str + default: '' + ldap_attribute_value: + description: + - The ldap attribute value to check against. + type: str + default: '' + members: + description: + - A list of user ref names (aaa/user). + type: list + elements: str + default: [] + network: + description: + - The network reference name. The objects contains the known IP addresses for the authentication object (network/aaa). + type: str + default: "" + radius_groups: + description: + - A list of radius group strings. + type: list + elements: str + default: [] + tacacs_groups: + description: + - A list of tacacs group strings. + type: list + elements: str + default: [] extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM aaa_group community.general.utm_aaa_group: utm_host: sophos.host.name @@ -142,63 +139,63 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created. - returned: success - type: complex - contains: - _ref: - description: The reference name of the object. - type: str - _locked: - description: Whether or not the object is currently locked. - type: bool - _type: - description: The type of the object. - type: str - name: - description: The name of the object. - type: str - adirectory_groups: - description: List of Active Directory Groups. - type: str - adirectory_groups_sids: - description: List of Active Directory Groups SIDS. - type: list - backend_match: - description: The backend to use. - type: str - comment: - description: The comment string. - type: str - dynamic: - description: Whether the group match is ipsec_dn or directory_group. - type: str - edirectory_groups: - description: List of eDirectory Groups. - type: str - ipsec_dn: - description: ipsec_dn identifier to match. - type: str - ldap_attribute: - description: The LDAP Attribute to match against. - type: str - ldap_attribute_value: - description: The LDAP Attribute Value to match against. - type: str - members: - description: List of member identifiers of the group. - type: list - network: - description: The identifier of the network (network/aaa). - type: str - radius_group: - description: The radius group identifier. - type: str - tacacs_group: - description: The tacacs group identifier. - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: Ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_aaa_group_info.py b/plugins/modules/utm_aaa_group_info.py index 37e01c736c..4f073176f2 100644 --- a/plugins/modules/utm_aaa_group_info.py +++ b/plugins/modules/utm_aaa_group_info.py @@ -10,38 +10,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_aaa_group_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Get info for reverse_proxy frontend entry in Sophos UTM description: - - get info for a reverse_proxy frontend entry in SOPHOS UTM. - + - Get info for a reverse_proxy frontend entry in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Remove UTM aaa_group community.general.utm_aaa_group_info: utm_host: sophos.host.name @@ -49,63 +46,63 @@ EXAMPLES = """ name: TestAAAGroupEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - adirectory_groups: - description: List of Active Directory Groups - type: str - adirectory_groups_sids: - description: List of Active Directory Groups SIDS - type: list - backend_match: - description: The backend to use - type: str - comment: - description: The comment string - type: str - dynamic: - description: Whether the group match is ipsec_dn or directory_group - type: str - edirectory_groups: - description: List of eDirectory Groups - type: str - ipsec_dn: - description: ipsec_dn identifier to match - type: str - ldap_attribute: - description: The LDAP Attribute to match against - type: str - ldap_attribute_value: - description: The LDAP Attribute Value to match against - type: str - members: - description: List of member identifiers of the group - type: list - network: - description: The identifier of the network (network/aaa) - type: str - radius_group: - description: The radius group identifier - type: str - tacacs_group: - description: The tacacs group identifier - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: Ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_ca_host_key_cert.py b/plugins/modules/utm_ca_host_key_cert.py index b944e83124..b67531c061 100644 --- a/plugins/modules/utm_ca_host_key_cert.py +++ b/plugins/modules/utm_ca_host_key_cert.py @@ -9,67 +9,64 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_ca_host_key_cert author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Create, update or destroy ca host_key_cert entry in Sophos UTM description: - - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry. - required: true - type: str - ca: - description: - - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - required: true - type: str - meta: - description: - - A reference to an existing utm_ca_meta_x509 object. - required: true - type: str - certificate: - description: - - The certificate in PEM format. - required: true - type: str - comment: - description: - - Optional comment string. - type: str - encrypted: - description: - - Optionally enable encryption. - default: false - type: bool - key: - description: - - Optional private key in PEM format. - type: str + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + type: str + ca: + description: + - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + required: true + type: str + meta: + description: + - A reference to an existing utm_ca_meta_x509 object. + required: true + type: str + certificate: + description: + - The certificate in PEM format. + required: true + type: str + comment: + description: + - Optional comment string. + type: str + encrypted: + description: + - Optionally enable encryption. + default: false + type: bool + key: + description: + - Optional private key in PEM format. + type: str extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create a ca_host_key_cert entry community.general.utm_ca_host_key_cert: utm_host: sophos.host.name @@ -98,45 +95,44 @@ EXAMPLES = """ utm_token: abcdefghijklmno1234 name: TestHostKeyCertEntry state: info - """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - ca: - description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - type: str - meta: - description: A reference to an existing utm_ca_meta_x509 object. - type: str - certificate: - description: The certificate in PEM format - type: str - comment: - description: Comment string (may be empty string) - type: str - encrypted: - description: If encryption is enabled - type: bool - key: - description: Private key in PEM format (may be empty string) - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format. + type: str + comment: + description: Comment string (may be empty string). + type: str + encrypted: + description: If encryption is enabled. + type: bool + key: + description: Private key in PEM format (may be empty string). + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_ca_host_key_cert_info.py b/plugins/modules/utm_ca_host_key_cert_info.py index d81eede69f..cab6657ab6 100644 --- a/plugins/modules/utm_ca_host_key_cert_info.py +++ b/plugins/modules/utm_ca_host_key_cert_info.py @@ -9,37 +9,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_ca_host_key_cert_info author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Get info for a ca host_key_cert entry in Sophos UTM description: - - Get info for a ca host_key_cert entry in SOPHOS UTM. - + - Get info for a ca host_key_cert entry in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get info for a ca host_key_cert entry community.general.utm_ca_host_key_cert_info: utm_host: sophos.host.name @@ -47,42 +45,42 @@ EXAMPLES = """ name: TestHostKeyCertEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - ca: - description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - type: str - meta: - description: A reference to an existing utm_ca_meta_x509 object. - type: str - certificate: - description: The certificate in PEM format - type: str - comment: - description: Comment string (may be empty string) - type: str - encrypted: - description: If encryption is enabled - type: bool - key: - description: Private key in PEM format (may be empty string) - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format. + type: str + comment: + description: Comment string (may be empty string). + type: str + encrypted: + description: If encryption is enabled. + type: bool + key: + description: Private key in PEM format (may be empty string). + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_dns_host.py b/plugins/modules/utm_dns_host.py index 6b3725557b..bbb93e9eb0 100644 --- a/plugins/modules/utm_dns_host.py +++ b/plugins/modules/utm_dns_host.py @@ -8,78 +8,75 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_dns_host author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy dns entry in Sophos UTM description: - - Create, update or destroy a dns entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a dns entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - address: - type: str - description: - - The IPV4 Address of the entry. Can be left empty for automatic resolving. - default: 0.0.0.0 - address6: - type: str - description: - - The IPV6 Address of the entry. Can be left empty for automatic resolving. - default: "::" - comment: - type: str - description: - - An optional comment to add to the dns host object - default: '' - hostname: - type: str - description: - - The hostname for the dns host object - interface: - type: str - description: - - The reference name of the interface to use. If not provided the default interface will be used - default: '' - resolved: - description: - - whether the hostname's ipv4 address is already resolved or not - default: false - type: bool - resolved6: - description: - - whether the hostname's ipv6 address is already resolved or not - default: false - type: bool - timeout: - type: int - description: - - the timeout for the utm to resolve the ip address for the hostname again - default: 0 + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + address: + type: str + description: + - The IPV4 Address of the entry. Can be left empty for automatic resolving. + default: 0.0.0.0 + address6: + type: str + description: + - The IPV6 Address of the entry. Can be left empty for automatic resolving. + default: "::" + comment: + type: str + description: + - An optional comment to add to the dns host object. + default: '' + hostname: + type: str + description: + - The hostname for the dns host object. + interface: + type: str + description: + - The reference name of the interface to use. If not provided the default interface will be used. + default: '' + resolved: + description: + - Whether the hostname's ipv4 address is already resolved or not. + default: false + type: bool + resolved6: + description: + - Whether the hostname's ipv6 address is already resolved or not. + default: false + type: bool + timeout: + type: int + description: + - The timeout for the utm to resolve the ip address for the hostname again. + default: 0 extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM dns host entry community.general.utm_dns_host: utm_host: sophos.host.name @@ -96,45 +93,45 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - name: - description: The name of the object - type: str - address: - description: The ipv4 address of the object - type: str - address6: - description: The ipv6 address of the object - type: str - comment: - description: The comment string - type: str - hostname: - description: The hostname of the object - type: str - interface: - description: The reference name of the interface the object is associated with - type: str - resolved: - description: Whether the ipv4 address is resolved or not - type: bool - resolved6: - description: Whether the ipv6 address is resolved or not - type: bool - timeout: - description: The timeout until a new resolving will be attempted - type: int + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + name: + description: The name of the object. + type: str + address: + description: The ipv4 address of the object. + type: str + address6: + description: The ipv6 address of the object. + type: str + comment: + description: The comment string. + type: str + hostname: + description: The hostname of the object. + type: str + interface: + description: The reference name of the interface the object is associated with. + type: str + resolved: + description: Whether the ipv4 address is resolved or not. + type: bool + resolved6: + description: Whether the ipv6 address is resolved or not. + type: bool + timeout: + description: The timeout until a new resolving will be attempted. + type: int """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_network_interface_address.py b/plugins/modules/utm_network_interface_address.py index a85a46aeab..1e3d2ee5c3 100644 --- a/plugins/modules/utm_network_interface_address.py +++ b/plugins/modules/utm_network_interface_address.py @@ -8,62 +8,58 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_network_interface_address author: - - Juergen Wiebe (@steamx) + - Juergen Wiebe (@steamx) short_description: Create, update or destroy network/interface_address object description: - - Create, update or destroy a network/interface_address object in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a network/interface_address object in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - address: - type: str - description: - - The ip4 address of the network/interface_address object. - required: true - address6: - type: str - description: - - The ip6 address of the network/interface_address object. - required: false - comment: - type: str - description: - - An optional comment to add to the object - default: '' - resolved: - type: bool - description: - - Whether or not the object is resolved - resolved6: - type: bool - description: - - Whether or not the object is resolved - + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + address: + type: str + description: + - The ip4 address of the network/interface_address object. + required: true + address6: + type: str + description: + - The ip6 address of the network/interface_address object. + required: false + comment: + type: str + description: + - An optional comment to add to the object. + default: '' + resolved: + type: bool + description: + - Whether or not the object is resolved. + resolved6: + type: bool + description: + - Whether or not the object is resolved. extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create a network interface address utm_proxy_backend: utm_host: sophos.host.name @@ -81,39 +77,39 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - address: - description: The ip4 address of the network/interface_address object - type: str - address6: - description: The ip6 address of the network/interface_address object - type: str - comment: - description: The comment string - type: str - resolved: - description: Whether or not the object is resolved - type: bool - resolved6: - description: Whether or not the object is resolved - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + address: + description: The ip4 address of the network/interface_address object. + type: str + address6: + description: The ip6 address of the network/interface_address object. + type: str + comment: + description: The comment string. + type: str + resolved: + description: Whether or not the object is resolved. + type: bool + resolved6: + description: Whether or not the object is resolved. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_network_interface_address_info.py b/plugins/modules/utm_network_interface_address_info.py index 9dc08ad094..b9c394c848 100644 --- a/plugins/modules/utm_network_interface_address_info.py +++ b/plugins/modules/utm_network_interface_address_info.py @@ -8,37 +8,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_network_interface_address_info author: - - Juergen Wiebe (@steamx) + - Juergen Wiebe (@steamx) short_description: Get info for a network/interface_address object description: - - Get info for a network/interface_address object in SOPHOS UTM. - + - Get info for a network/interface_address object in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get network interface address info utm_proxy_interface_address_info: utm_host: sophos.host.name @@ -46,39 +44,39 @@ EXAMPLES = """ name: TestNetworkInterfaceAddress """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - address: - description: The ip4 address of the network/interface_address object - type: str - address6: - description: The ip6 address of the network/interface_address object - type: str - comment: - description: The comment string - type: str - resolved: - description: Whether or not the object is resolved - type: bool - resolved6: - description: Whether or not the object is resolved - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + address: + description: The ip4 address of the network/interface_address object. + type: str + address6: + description: The ip6 address of the network/interface_address object. + type: str + comment: + description: The comment string. + type: str + resolved: + description: Whether or not the object is resolved. + type: bool + resolved6: + description: Whether or not the object is resolved. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_auth_profile.py b/plugins/modules/utm_proxy_auth_profile.py index 3b482483bf..207c4ba156 100644 --- a/plugins/modules/utm_proxy_auth_profile.py +++ b/plugins/modules/utm_proxy_auth_profile.py @@ -9,183 +9,180 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_auth_profile author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Create, update or destroy reverse_proxy auth_profile entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - aaa: - type: list - elements: str - description: - - List of references to utm_aaa objects (allowed users or groups) - required: true - basic_prompt: - type: str - description: - - The message in the basic authentication prompt - required: true - backend_mode: - type: str - description: - - Specifies if the backend server needs authentication ([Basic|None]) - default: None - choices: - - Basic - - None - backend_strip_basic_auth: - description: - - Should the login data be stripped when proxying the request to the backend host - type: bool - default: true - backend_user_prefix: - type: str - description: - - Prefix string to prepend to the username for backend authentication - default: "" - backend_user_suffix: - type: str - description: - - Suffix string to append to the username for backend authentication - default: "" - comment: - type: str - description: - - Optional comment string - default: "" - frontend_cookie: - type: str - description: - - Frontend cookie name - frontend_cookie_secret: - type: str - description: - - Frontend cookie secret - frontend_form: - type: str - description: - - Frontend authentication form name - frontend_form_template: - type: str - description: - - Frontend authentication form template - default: "" - frontend_login: - type: str - description: - - Frontend login name - frontend_logout: - type: str - description: - - Frontend logout name - frontend_mode: - type: str - description: - - Frontend authentication mode (Form|Basic) - default: Basic - choices: - - Basic - - Form - frontend_realm: - type: str - description: - - Frontend authentication realm - frontend_session_allow_persistency: - description: - - Allow session persistency - type: bool - default: false - frontend_session_lifetime: - type: int - description: - - session lifetime - required: true - frontend_session_lifetime_limited: - description: - - Specifies if limitation of session lifetime is active - type: bool - default: true - frontend_session_lifetime_scope: - type: str - description: - - scope for frontend_session_lifetime (days|hours|minutes) - default: hours - choices: - - days - - hours - - minutes - frontend_session_timeout: - type: int - description: - - session timeout - required: true - frontend_session_timeout_enabled: - description: - - Specifies if session timeout is active - type: bool - default: true - frontend_session_timeout_scope: - type: str - description: - - scope for frontend_session_timeout (days|hours|minutes) - default: minutes - choices: - - days - - hours - - minutes - logout_delegation_urls: - type: list - elements: str - description: - - List of logout URLs that logouts are delegated to - default: [] - logout_mode: - type: str - description: - - Mode of logout (None|Delegation) - default: None - choices: - - None - - Delegation - redirect_to_requested_url: - description: - - Should a redirect to the requested URL be made - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + aaa: + type: list + elements: str + description: + - List of references to utm_aaa objects (allowed users or groups). + required: true + basic_prompt: + type: str + description: + - The message in the basic authentication prompt. + required: true + backend_mode: + type: str + description: + - Specifies if the backend server needs authentication ([Basic|None]). + default: None + choices: + - Basic + - None + backend_strip_basic_auth: + description: + - Should the login data be stripped when proxying the request to the backend host. + type: bool + default: true + backend_user_prefix: + type: str + description: + - Prefix string to prepend to the username for backend authentication. + default: "" + backend_user_suffix: + type: str + description: + - Suffix string to append to the username for backend authentication. + default: "" + comment: + type: str + description: + - Optional comment string. + default: "" + frontend_cookie: + type: str + description: + - Frontend cookie name. + frontend_cookie_secret: + type: str + description: + - Frontend cookie secret. + frontend_form: + type: str + description: + - Frontend authentication form name. + frontend_form_template: + type: str + description: + - Frontend authentication form template. + default: "" + frontend_login: + type: str + description: + - Frontend login name. + frontend_logout: + type: str + description: + - Frontend logout name. + frontend_mode: + type: str + description: + - Frontend authentication mode (Form|Basic). + default: Basic + choices: + - Basic + - Form + frontend_realm: + type: str + description: + - Frontend authentication realm. + frontend_session_allow_persistency: + description: + - Allow session persistency. + type: bool + default: false + frontend_session_lifetime: + type: int + description: + - Session lifetime. + required: true + frontend_session_lifetime_limited: + description: + - Specifies if limitation of session lifetime is active. + type: bool + default: true + frontend_session_lifetime_scope: + type: str + description: + - Scope for frontend_session_lifetime (days|hours|minutes). + default: hours + choices: + - days + - hours + - minutes + frontend_session_timeout: + type: int + description: + - Session timeout. + required: true + frontend_session_timeout_enabled: + description: + - Specifies if session timeout is active. + type: bool + default: true + frontend_session_timeout_scope: + type: str + description: + - Scope for frontend_session_timeout (days|hours|minutes). + default: minutes + choices: + - days + - hours + - minutes + logout_delegation_urls: + type: list + elements: str + description: + - List of logout URLs that logouts are delegated to. + default: [] + logout_mode: + type: str + description: + - Mode of logout (None|Delegation). + default: None + choices: + - None + - Delegation + redirect_to_requested_url: + description: + - Should a redirect to the requested URL be made. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_auth_profile community.general.utm_proxy_auth_profile: utm_host: sophos.host.name utm_token: abcdefghijklmno1234 name: TestAuthProfileEntry - aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING] + aaa: [REF_OBJECT_STRING, REF_ANOTHEROBJECT_STRING] basic_prompt: "Authentication required: Please login" frontend_session_lifetime: 1 frontend_session_timeout: 1 @@ -204,99 +201,98 @@ EXAMPLES = """ utm_token: abcdefghijklmno1234 name: TestAuthProfileEntry state: info - """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - aaa: - description: List of references to utm_aaa objects (allowed users or groups) - type: list - basic_prompt: - description: The message in the basic authentication prompt - type: str - backend_mode: - description: Specifies if the backend server needs authentication ([Basic|None]) - type: str - backend_strip_basic_auth: - description: Should the login data be stripped when proxying the request to the backend host - type: bool - backend_user_prefix: - description: Prefix string to prepend to the username for backend authentication - type: str - backend_user_suffix: - description: Suffix string to append to the username for backend authentication - type: str - comment: - description: Optional comment string - type: str - frontend_cookie: - description: Frontend cookie name - type: str - frontend_form: - description: Frontend authentication form name - type: str - frontend_form_template: - description: Frontend authentication form template - type: str - frontend_login: - description: Frontend login name - type: str - frontend_logout: - description: Frontend logout name - type: str - frontend_mode: - description: Frontend authentication mode (Form|Basic) - type: str - frontend_realm: - description: Frontend authentication realm - type: str - frontend_session_allow_persistency: - description: Allow session persistency - type: bool - frontend_session_lifetime: - description: session lifetime - type: int - frontend_session_lifetime_limited: - description: Specifies if limitation of session lifetime is active - type: bool - frontend_session_lifetime_scope: - description: scope for frontend_session_lifetime (days|hours|minutes) - type: str - frontend_session_timeout: - description: session timeout - type: int - frontend_session_timeout_enabled: - description: Specifies if session timeout is active - type: bool - frontend_session_timeout_scope: - description: scope for frontend_session_timeout (days|hours|minutes) - type: str - logout_delegation_urls: - description: List of logout URLs that logouts are delegated to - type: list - logout_mode: - description: Mode of logout (None|Delegation) - type: str - redirect_to_requested_url: - description: Should a redirect to the requested URL be made - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + aaa: + description: List of references to utm_aaa objects (allowed users or groups). + type: list + basic_prompt: + description: The message in the basic authentication prompt. + type: str + backend_mode: + description: Specifies if the backend server needs authentication ([Basic|None]). + type: str + backend_strip_basic_auth: + description: Should the login data be stripped when proxying the request to the backend host. + type: bool + backend_user_prefix: + description: Prefix string to prepend to the username for backend authentication. + type: str + backend_user_suffix: + description: Suffix string to append to the username for backend authentication. + type: str + comment: + description: Optional comment string. + type: str + frontend_cookie: + description: Frontend cookie name. + type: str + frontend_form: + description: Frontend authentication form name. + type: str + frontend_form_template: + description: Frontend authentication form template. + type: str + frontend_login: + description: Frontend login name. + type: str + frontend_logout: + description: Frontend logout name. + type: str + frontend_mode: + description: Frontend authentication mode (Form|Basic). + type: str + frontend_realm: + description: Frontend authentication realm. + type: str + frontend_session_allow_persistency: + description: Allow session persistency. + type: bool + frontend_session_lifetime: + description: Session lifetime. + type: int + frontend_session_lifetime_limited: + description: Specifies if limitation of session lifetime is active. + type: bool + frontend_session_lifetime_scope: + description: Scope for frontend_session_lifetime (days|hours|minutes). + type: str + frontend_session_timeout: + description: Session timeout. + type: int + frontend_session_timeout_enabled: + description: Specifies if session timeout is active. + type: bool + frontend_session_timeout_scope: + description: Scope for frontend_session_timeout (days|hours|minutes). + type: str + logout_delegation_urls: + description: List of logout URLs that logouts are delegated to. + type: list + logout_mode: + description: Mode of logout (None|Delegation). + type: str + redirect_to_requested_url: + description: Should a redirect to the requested URL be made. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_exception.py b/plugins/modules/utm_proxy_exception.py index a0a3f85b5b..96cb592e59 100644 --- a/plugins/modules/utm_proxy_exception.py +++ b/plugins/modules/utm_proxy_exception.py @@ -9,130 +9,127 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_exception author: - - Sebastian Schenzel (@RickS-C137) + - Sebastian Schenzel (@RickS-C137) short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry - required: true - type: str - op: - description: - - The operand to be used with the entries of the path parameter - default: 'AND' - choices: - - 'AND' - - 'OR' - required: false - type: str - path: - description: - - The paths the exception in the reverse proxy is defined for - type: list - elements: str - default: [] - required: false - skip_custom_threats_filters: - description: - - A list of threats to be skipped - type: list - elements: str - default: [] - required: false - skip_threats_filter_categories: - description: - - Define which categories of threats are skipped - type: list - elements: str - default: [] - required: false - skipav: - description: - - Skip the Antivirus Scanning - default: false - type: bool - required: false - skipbadclients: - description: - - Block clients with bad reputation - default: false - type: bool - required: false - skipcookie: - description: - - Skip the Cookie Signing check - default: false - type: bool - required: false - skipform: - description: - - Enable form hardening - default: false - type: bool - required: false - skipform_missingtoken: - description: - - Enable form hardening with missing tokens - default: false - type: bool - required: false - skiphtmlrewrite: - description: - - Protection against SQL - default: false - type: bool - required: false - skiptft: - description: - - Enable true file type control - default: false - type: bool - required: false - skipurl: - description: - - Enable static URL hardening - default: false - type: bool - required: false - source: - description: - - Define which categories of threats are skipped - type: list - elements: str - default: [] - required: false - status: - description: - - Status of the exception rule set - default: true - type: bool - required: false + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + type: str + op: + description: + - The operand to be used with the entries of the path parameter. + default: 'AND' + choices: + - 'AND' + - 'OR' + required: false + type: str + path: + description: + - The paths the exception in the reverse proxy is defined for. + type: list + elements: str + default: [] + required: false + skip_custom_threats_filters: + description: + - A list of threats to be skipped. + type: list + elements: str + default: [] + required: false + skip_threats_filter_categories: + description: + - Define which categories of threats are skipped. + type: list + elements: str + default: [] + required: false + skipav: + description: + - Skip the Antivirus Scanning. + default: false + type: bool + required: false + skipbadclients: + description: + - Block clients with bad reputation. + default: false + type: bool + required: false + skipcookie: + description: + - Skip the Cookie Signing check. + default: false + type: bool + required: false + skipform: + description: + - Enable form hardening. + default: false + type: bool + required: false + skipform_missingtoken: + description: + - Enable form hardening with missing tokens. + default: false + type: bool + required: false + skiphtmlrewrite: + description: + - Protection against SQL. + default: false + type: bool + required: false + skiptft: + description: + - Enable true file type control. + default: false + type: bool + required: false + skipurl: + description: + - Enable static URL hardening. + default: false + type: bool + required: false + source: + description: + - Define which categories of threats are skipped. + type: list + elements: str + default: [] + required: false + status: + description: + - Status of the exception rule set. + default: true + type: bool + required: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_exception community.general.utm_proxy_exception: utm_host: sophos.host.name @@ -149,66 +146,66 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - comment: - description: The optional comment string - type: str - op: - description: The operand to be used with the entries of the path parameter - type: str - path: - description: The paths the exception in the reverse proxy is defined for - type: list - skip_custom_threats_filters: - description: A list of threats to be skipped - type: list - skip_threats_filter_categories: - description: Define which categories of threats are skipped - type: list - skipav: - description: Skip the Antivirus Scanning - type: bool - skipbadclients: - description: Block clients with bad reputation - type: bool - skipcookie: - description: Skip the Cookie Signing check - type: bool - skipform: - description: Enable form hardening - type: bool - skipform_missingtoken: - description: Enable form hardening with missing tokens - type: bool - skiphtmlrewrite: - description: Protection against SQL - type: bool - skiptft: - description: Enable true file type control - type: bool - skipurl: - description: Enable static URL hardening - type: bool - source: - description: Define which categories of threats are skipped - type: list + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + comment: + description: The optional comment string. + type: str + op: + description: The operand to be used with the entries of the path parameter. + type: str + path: + description: The paths the exception in the reverse proxy is defined for. + type: list + skip_custom_threats_filters: + description: A list of threats to be skipped. + type: list + skip_threats_filter_categories: + description: Define which categories of threats are skipped. + type: list + skipav: + description: Skip the Antivirus Scanning. + type: bool + skipbadclients: + description: Block clients with bad reputation. + type: bool + skipcookie: + description: Skip the Cookie Signing check. + type: bool + skipform: + description: Enable form hardening. + type: bool + skipform_missingtoken: + description: Enable form hardening with missing tokens. + type: bool + skiphtmlrewrite: + description: Protection against SQL. + type: bool + skiptft: + description: Enable true file type control. + type: bool + skipurl: + description: Enable static URL hardening. + type: bool + source: + description: Define which categories of threats are skipped. + type: list """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_frontend.py b/plugins/modules/utm_proxy_frontend.py index 22a773fef8..1c3489f493 100644 --- a/plugins/modules/utm_proxy_frontend.py +++ b/plugins/modules/utm_proxy_frontend.py @@ -9,145 +9,142 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_frontend author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - add_content_type_header : - description: - - Whether to add the content type header or not - type: bool - default: false - address: - type: str - description: - - The reference name of the network/interface_address object. - default: REF_DefaultInternalAddress - allowed_networks: - type: list - elements: str - description: - - A list of reference names for the allowed networks. - default: ['REF_NetworkAny'] - certificate: - type: str - description: - - The reference name of the ca/host_key_cert object. - default: "" - comment: - type: str - description: - - An optional comment to add to the object - default: "" - disable_compression: - description: - - Whether to enable the compression - type: bool - default: false - domain: - type: list - elements: str - description: - - A list of domain names for the frontend object - exceptions: - type: list - elements: str - description: - - A list of exception ref names (reverse_proxy/exception) - default: [] - htmlrewrite: - description: - - Whether to enable html rewrite or not - type: bool - default: false - htmlrewrite_cookies: - description: - - Whether to enable html rewrite cookie or not - type: bool - default: false - implicitredirect: - description: - - Whether to enable implicit redirection or not - type: bool - default: false - lbmethod: - type: str - description: - - Which loadbalancer method should be used - choices: - - "" - - bybusyness - - bytraffic - - byrequests - default: bybusyness - locations: - type: list - elements: str - description: - - A list of location ref names (reverse_proxy/location) - default: [] - port: - type: int - description: - - The frontend http port - default: 80 - preservehost: - description: - - Whether to preserve host header - type: bool - default: false - profile: - type: str - description: - - The reference string of the reverse_proxy/profile - default: "" - status: - description: - - Whether to activate the frontend entry or not - type: bool - default: true - type: - type: str - description: - - Which protocol should be used - choices: - - http - - https - default: http - xheaders: - description: - - Whether to pass the host header or not - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + add_content_type_header: + description: + - Whether to add the content type header or not. + type: bool + default: false + address: + type: str + description: + - The reference name of the network/interface_address object. + default: REF_DefaultInternalAddress + allowed_networks: + type: list + elements: str + description: + - A list of reference names for the allowed networks. + default: ['REF_NetworkAny'] + certificate: + type: str + description: + - The reference name of the ca/host_key_cert object. + default: "" + comment: + type: str + description: + - An optional comment to add to the object. + default: "" + disable_compression: + description: + - Whether to enable the compression. + type: bool + default: false + domain: + type: list + elements: str + description: + - A list of domain names for the frontend object. + exceptions: + type: list + elements: str + description: + - A list of exception ref names (reverse_proxy/exception). + default: [] + htmlrewrite: + description: + - Whether to enable html rewrite or not. + type: bool + default: false + htmlrewrite_cookies: + description: + - Whether to enable html rewrite cookie or not. + type: bool + default: false + implicitredirect: + description: + - Whether to enable implicit redirection or not. + type: bool + default: false + lbmethod: + type: str + description: + - Which loadbalancer method should be used. + choices: + - "" + - bybusyness + - bytraffic + - byrequests + default: bybusyness + locations: + type: list + elements: str + description: + - A list of location ref names (reverse_proxy/location). + default: [] + port: + type: int + description: + - The frontend http port. + default: 80 + preservehost: + description: + - Whether to preserve host header. + type: bool + default: false + profile: + type: str + description: + - The reference string of the reverse_proxy/profile. + default: "" + status: + description: + - Whether to activate the frontend entry or not. + type: bool + default: true + type: + type: str + description: + - Which protocol should be used. + choices: + - http + - https + default: http + xheaders: + description: + - Whether to pass the host header or not. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create utm proxy_frontend community.general.utm_proxy_frontend: utm_host: sophos.host.name @@ -164,81 +161,81 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - add_content_type_header: - description: Whether to add the content type header - type: bool - address: - description: The reference name of the address - type: str - allowed_networks: - description: List of reference names of networks associated - type: list - certificate: - description: Reference name of certificate (ca/host_key_cert) - type: str - comment: - description: The comment string - type: str - disable_compression: - description: State of compression support - type: bool - domain: - description: List of hostnames - type: list - exceptions: - description: List of associated proxy exceptions - type: list - htmlrewrite: - description: State of html rewrite - type: bool - htmlrewrite_cookies: - description: Whether the html rewrite cookie will be set - type: bool - implicitredirect: - description: Whether to use implicit redirection - type: bool - lbmethod: - description: The method of loadbalancer to use - type: str - locations: - description: The reference names of reverse_proxy/locations associated with the object - type: list - port: - description: The port of the frontend connection - type: int - preservehost: - description: Preserve host header - type: bool - profile: - description: The associated reverse_proxy/profile - type: str - status: - description: Whether the frontend object is active or not - type: bool - type: - description: The connection type - type: str - xheaders: - description: The xheaders state - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + add_content_type_header: + description: Whether to add the content type header. + type: bool + address: + description: The reference name of the address. + type: str + allowed_networks: + description: List of reference names of networks associated. + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert). + type: str + comment: + description: The comment string. + type: str + disable_compression: + description: State of compression support. + type: bool + domain: + description: List of hostnames. + type: list + exceptions: + description: List of associated proxy exceptions. + type: list + htmlrewrite: + description: State of html rewrite. + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set. + type: bool + implicitredirect: + description: Whether to use implicit redirection. + type: bool + lbmethod: + description: The method of loadbalancer to use. + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object. + type: list + port: + description: The port of the frontend connection. + type: int + preservehost: + description: Preserve host header. + type: bool + profile: + description: The associated reverse_proxy/profile. + type: str + status: + description: Whether the frontend object is active or not. + type: bool + type: + description: The connection type. + type: str + xheaders: + description: The xheaders state. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_frontend_info.py b/plugins/modules/utm_proxy_frontend_info.py index 0435ef9494..0709cad01e 100644 --- a/plugins/modules/utm_proxy_frontend_info.py +++ b/plugins/modules/utm_proxy_frontend_info.py @@ -9,38 +9,36 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_frontend_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - version_added: 3.3.0 + check_mode: + version_added: 3.3.0 # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get utm proxy_frontend community.general.utm_proxy_frontend_info: utm_host: sophos.host.name @@ -49,81 +47,81 @@ EXAMPLES = """ host: REF_OBJECT_STRING """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - add_content_type_header: - description: Whether to add the content type header - type: bool - address: - description: The reference name of the address - type: str - allowed_networks: - description: List of reference names of networks associated - type: list - certificate: - description: Reference name of certificate (ca/host_key_cert) - type: str - comment: - description: The comment string - type: str - disable_compression: - description: State of compression support - type: bool - domain: - description: List of hostnames - type: list - exceptions: - description: List of associated proxy exceptions - type: list - htmlrewrite: - description: State of html rewrite - type: bool - htmlrewrite_cookies: - description: whether the html rewrite cookie will be set - type: bool - implicitredirect: - description: whether to use implicit redirection - type: bool - lbmethod: - description: The method of loadbalancer to use - type: str - locations: - description: The reference names of reverse_proxy/locations associated with the object - type: list - port: - description: The port of the frontend connection - type: int - preservehost: - description: Preserve host header - type: bool - profile: - description: The associated reverse_proxy/profile - type: str - status: - description: Whether the frontend object is active or not - type: bool - type: - description: The connection type - type: str - xheaders: - description: The xheaders state - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + add_content_type_header: + description: Whether to add the content type header. + type: bool + address: + description: The reference name of the address. + type: str + allowed_networks: + description: List of reference names of networks associated. + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert). + type: str + comment: + description: The comment string. + type: str + disable_compression: + description: State of compression support. + type: bool + domain: + description: List of hostnames. + type: list + exceptions: + description: List of associated proxy exceptions. + type: list + htmlrewrite: + description: State of html rewrite. + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set. + type: bool + implicitredirect: + description: Whether to use implicit redirection. + type: bool + lbmethod: + description: The method of loadbalancer to use. + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object. + type: list + port: + description: The port of the frontend connection. + type: int + preservehost: + description: Preserve host header. + type: bool + profile: + description: The associated reverse_proxy/profile. + type: str + status: + description: Whether the frontend object is active or not. + type: bool + type: + description: The connection type. + type: str + xheaders: + description: The xheaders state. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_location.py b/plugins/modules/utm_proxy_location.py index 736f564d58..944050bfb6 100644 --- a/plugins/modules/utm_proxy_location.py +++ b/plugins/modules/utm_proxy_location.py @@ -9,111 +9,108 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_location author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - access_control: - description: - - whether to activate the access control for the location - type: str - default: '0' - choices: - - '0' - - '1' - allowed_networks: - description: - - A list of allowed networks - type: list - elements: str - default: - - REF_NetworkAny - auth_profile: - type: str - description: - - The reference name of the auth profile - default: '' - backend: - type: list - elements: str - description: - - A list of backends that are connected with this location declaration - default: [] - be_path: - type: str - description: - - The path of the backend - default: '' - comment: - type: str - description: - - The optional comment string - default: '' - denied_networks: - type: list - elements: str - description: - - A list of denied network references - default: [] - hot_standby: - description: - - Activate hot standby mode - type: bool - default: false - path: - type: str - description: - - The path of the location - default: "/" - status: - description: - - Whether the location is active or not - type: bool - default: true - stickysession_id: - type: str - description: - - The stickysession id - default: ROUTEID - stickysession_status: - description: - - Enable the stickysession - type: bool - default: false - websocket_passthrough: - description: - - Enable the websocket passthrough - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + access_control: + description: + - Whether to activate the access control for the location. + type: str + default: '0' + choices: + - '0' + - '1' + allowed_networks: + description: + - A list of allowed networks. + type: list + elements: str + default: + - REF_NetworkAny + auth_profile: + type: str + description: + - The reference name of the auth profile. + default: '' + backend: + type: list + elements: str + description: + - A list of backends that are connected with this location declaration. + default: [] + be_path: + type: str + description: + - The path of the backend. + default: '' + comment: + type: str + description: + - The optional comment string. + default: '' + denied_networks: + type: list + elements: str + description: + - A list of denied network references. + default: [] + hot_standby: + description: + - Activate hot standby mode. + type: bool + default: false + path: + type: str + description: + - The path of the location. + default: "/" + status: + description: + - Whether the location is active or not. + type: bool + default: true + stickysession_id: + type: str + description: + - The stickysession id. + default: ROUTEID + stickysession_status: + description: + - Enable the stickysession. + type: bool + default: false + websocket_passthrough: + description: + - Enable the websocket passthrough. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_location utm_proxy_backend: utm_host: sophos.host.name @@ -130,63 +127,63 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - access_control: - description: Whether to use access control state - type: str - allowed_networks: - description: List of allowed network reference names - type: list - auth_profile: - description: The auth profile reference name - type: str - backend: - description: The backend reference name - type: str - be_path: - description: The backend path - type: str - comment: - description: The comment string - type: str - denied_networks: - description: The list of the denied network names - type: list - hot_standby: - description: Use hot standby - type: bool - path: - description: Path name - type: str - status: - description: Whether the object is active or not - type: bool - stickysession_id: - description: The identifier of the stickysession - type: str - stickysession_status: - description: Whether to use stickysession or not - type: bool - websocket_passthrough: - description: Whether websocket passthrough will be used or not - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + access_control: + description: Whether to use access control state. + type: str + allowed_networks: + description: List of allowed network reference names. + type: list + auth_profile: + description: The auth profile reference name. + type: str + backend: + description: The backend reference name. + type: str + be_path: + description: The backend path. + type: str + comment: + description: The comment string. + type: str + denied_networks: + description: The list of the denied network names. + type: list + hot_standby: + description: Use hot standby. + type: bool + path: + description: Path name. + type: str + status: + description: Whether the object is active or not. + type: bool + stickysession_id: + description: The identifier of the stickysession. + type: str + stickysession_status: + description: Whether to use stickysession or not. + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_location_info.py b/plugins/modules/utm_proxy_location_info.py index 4e4ba9b139..a7ea37ea79 100644 --- a/plugins/modules/utm_proxy_location_info.py +++ b/plugins/modules/utm_proxy_location_info.py @@ -9,38 +9,36 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_location_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - version_added: 3.3.0 + check_mode: + version_added: 3.3.0 # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Remove UTM proxy_location community.general.utm_proxy_location_info: utm_host: sophos.host.name @@ -48,63 +46,63 @@ EXAMPLES = """ name: TestLocationEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - access_control: - description: Whether to use access control state - type: str - allowed_networks: - description: List of allowed network reference names - type: list - auth_profile: - description: The auth profile reference name - type: str - backend: - description: The backend reference name - type: str - be_path: - description: The backend path - type: str - comment: - description: The comment string - type: str - denied_networks: - description: The list of the denied network names - type: list - hot_standby: - description: Use hot standby - type: bool - path: - description: Path name - type: str - status: - description: Whether the object is active or not - type: bool - stickysession_id: - description: The identifier of the stickysession - type: str - stickysession_status: - description: Whether to use stickysession or not - type: bool - websocket_passthrough: - description: Whether websocket passthrough will be used or not - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + access_control: + description: Whether to use access control state. + type: str + allowed_networks: + description: List of allowed network reference names. + type: list + auth_profile: + description: The auth profile reference name. + type: str + backend: + description: The backend reference name. + type: str + be_path: + description: The backend path. + type: str + comment: + description: The comment string. + type: str + denied_networks: + description: The list of the denied network names. + type: list + hot_standby: + description: Use hot standby. + type: bool + path: + description: Path name. + type: str + status: + description: Whether the object is active or not. + type: bool + stickysession_id: + description: The identifier of the stickysession. + type: str + stickysession_status: + description: Whether to use stickysession or not. + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule From 6e84c1375e23264e835f51610d0e9e2241104051 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 24 Dec 2024 23:59:56 +1300 Subject: [PATCH 397/482] t*: normalize docs (#9339) * t*: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein * break long line to regain sanity --------- Co-authored-by: Felix Fontein --- plugins/modules/taiga_issue.py | 14 +++--- plugins/modules/telegram.py | 28 +++++------ plugins/modules/terraform.py | 88 +++++++++++++++------------------- plugins/modules/timezone.py | 50 +++++++++---------- plugins/modules/twilio.py | 42 ++++++++-------- plugins/modules/typetalk.py | 21 ++++---- 6 files changed, 109 insertions(+), 134 deletions(-) diff --git a/plugins/modules/taiga_issue.py b/plugins/modules/taiga_issue.py index e80ff43b89..b66db29dba 100644 --- a/plugins/modules/taiga_issue.py +++ b/plugins/modules/taiga_issue.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: taiga_issue short_description: Creates/deletes an issue in a Taiga Project Management Platform description: @@ -89,10 +88,11 @@ options: author: Alejandro Guirao (@lekum) requirements: [python-taiga] notes: -- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD -''' + - The authentication is achieved either by the environment variable E(TAIGA_TOKEN) or by the pair + of environment variables E(TAIGA_USERNAME) and E(TAIGA_PASSWORD). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an issue in the my hosted Taiga environment and attach an error log community.general.taiga_issue: taiga_host: https://mytaigahost.example.com @@ -117,9 +117,9 @@ EXAMPLES = ''' subject: An error has been found issue_type: Bug state: absent -''' +""" -RETURN = '''# ''' +RETURN = """# """ import traceback from os import getenv diff --git a/plugins/modules/telegram.py b/plugins/modules/telegram.py index 963c66353f..c2fee153ff 100644 --- a/plugins/modules/telegram.py +++ b/plugins/modules/telegram.py @@ -9,22 +9,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: telegram author: - - "Artem Feofanov (@tyouxa)" - - "Nikolai Lomov (@lomserman)" + - "Artem Feofanov (@tyouxa)" + - "Nikolai Lomov (@lomserman)" short_description: Send notifications via telegram description: - - Send notifications via telegram bot, to a verified group or user. - - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument. + - Send notifications via telegram bot, to a verified group or user. + - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument. notes: - - You will require a telegram account and create telegram bot to use this module. + - You will require a telegram account and create telegram bot to use this module. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -49,11 +48,9 @@ options: - Any parameters for the method. - For reference to default method, V(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage). version_added: 2.0.0 +""" -''' - -EXAMPLES = """ - +EXAMPLES = r""" - name: Send notify to Telegram community.general.telegram: token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' @@ -75,15 +72,14 @@ EXAMPLES = """ message_id: '{{ saved_msg_id }}' """ -RETURN = """ - +RETURN = r""" msg: - description: The message you attempted to send + description: The message you attempted to send. returned: success type: str sample: "Ansible task finished" telegram_error: - description: Error message gotten from Telegram API + description: Error message gotten from Telegram API. returned: failure type: str sample: "Bad Request: message text is empty" diff --git a/plugins/modules/terraform.py b/plugins/modules/terraform.py index 5906657c66..cf2d07fc71 100644 --- a/plugins/modules/terraform.py +++ b/plugins/modules/terraform.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: terraform short_description: Manages a Terraform deployment (and plans) description: - - Provides support for deploying resources with Terraform and pulling - resource information back into Ansible. + - Provides support for deploying resources with Terraform and pulling resource information back into Ansible. extends_documentation_fragment: - community.general.attributes attributes: @@ -27,18 +25,16 @@ options: state: choices: ['planned', 'present', 'absent'] description: - - Goal state of given stage/project + - Goal state of given stage/project. type: str default: present binary_path: description: - - The path of a terraform binary to use, relative to the 'service_path' - unless you supply an absolute path. + - The path of a C(terraform) binary to use, relative to the 'service_path' unless you supply an absolute path. type: path project_path: description: - - The path to the root of the Terraform directory with the - vars.tf/main.tf/etc to use. + - The path to the root of the Terraform directory with the C(vars.tf)/C(main.tf)/etc to use. type: path required: true plugin_paths: @@ -48,88 +44,80 @@ options: - When set, the plugin discovery and auto-download behavior of Terraform is disabled. - The directory structure in the plugin path can be tricky. The Terraform docs U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) - show a simple directory of files, but actually, the directory structure - has to follow the same structure you would see if Terraform auto-downloaded the plugins. - See the examples below for a tree output of an example plugin directory. + show a simple directory of files, but actually, the directory structure has to follow the same structure you would see if Terraform auto-downloaded + the plugins. See the examples below for a tree output of an example plugin directory. type: list elements: path version_added: 3.0.0 workspace: description: - - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable - that is used to override workspace selection. For more information about workspaces - have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). + - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable that is used to override workspace selection. + For more information about workspaces have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). type: str default: default purge_workspace: description: - - Only works with state = absent + - Only works with state = absent. - If true, the workspace will be deleted after the "terraform destroy" action. - The 'default' workspace will not be deleted. default: false type: bool plan_file: description: - - The path to an existing Terraform plan file to apply. If this is not - specified, Ansible will build a new TF plan and execute it. - Note that this option is required if 'state' has the 'planned' value. + - The path to an existing Terraform plan file to apply. If this is not specified, Ansible will build a new TF plan and execute it. Note + that this option is required if 'state' has the 'planned' value. type: path state_file: description: - - The path to an existing Terraform state file to use when building plan. - If this is not specified, the default C(terraform.tfstate) will be used. + - The path to an existing Terraform state file to use when building plan. If this is not specified, the default C(terraform.tfstate) will + be used. - This option is ignored when plan is specified. type: path variables_files: description: - - The path to a variables file for Terraform to fill into the TF - configurations. This can accept a list of paths to multiple variables files. + - The path to a variables file for Terraform to fill into the TF configurations. This can accept a list of paths to multiple variables files. type: list elements: path - aliases: [ 'variables_file' ] + aliases: ['variables_file'] variables: description: - - A group of key-values pairs to override template variables or those in variables files. - By default, only string and number values are allowed, which are passed on unquoted. + - A group of key-values pairs to override template variables or those in variables files. By default, only string and number values are + allowed, which are passed on unquoted. - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when O(complex_vars=true). - Ansible integers or floats are mapped to terraform numbers. - Ansible strings are mapped to terraform strings. - Ansible dictionaries are mapped to terraform objects. - Ansible lists are mapped to terraform lists. - Ansible booleans are mapped to terraform booleans. - - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!" + - B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!. type: dict complex_vars: description: - Enable/disable capability to handle complex variable structures for C(terraform). - - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). - Strings that are passed are correctly quoted. + - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). Strings that are passed are correctly + quoted. - When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted. type: bool default: false version_added: 5.7.0 targets: description: - - A list of specific resources to target in this plan/application. The - resources selected here will also auto-include any dependencies. + - A list of specific resources to target in this plan/application. The resources selected here will also auto-include any dependencies. type: list elements: str default: [] lock: description: - - Enable statefile locking, if you use a service that accepts locks (such - as S3+DynamoDB) to store your statefile. + - Enable statefile locking, if you use a service that accepts locks (such as S3+DynamoDB) to store your statefile. type: bool default: true lock_timeout: description: - - How long to maintain the lock on the statefile, if you use a service - that accepts locks (such as S3+DynamoDB). + - How long to maintain the lock on the statefile, if you use a service that accepts locks (such as S3+DynamoDB). type: int force_init: description: - - To avoid duplicating infra, if a state file can't be found this will - force a C(terraform init). Generally, this should be turned off unless + - To avoid duplicating infra, if a state file can't be found this will force a C(terraform init). Generally, this should be turned off unless you intend to provision an entirely new Terraform deployment. default: false type: bool @@ -145,8 +133,8 @@ options: type: dict backend_config_files: description: - - The path to a configuration file to provide at init state to the -backend-config parameter. - This can accept a list of paths to multiple configuration files. + - The path to a configuration file to provide at init state to the -backend-config parameter. This can accept a list of paths to multiple + configuration files. type: list elements: path version_added: '0.2.0' @@ -164,8 +152,8 @@ options: version_added: '1.3.0' check_destroy: description: - - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, - but not "destroy and re-create" actions. This option is ignored when O(state=absent). + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, but not "destroy and re-create" actions. This + option is ignored when O(state=absent). type: bool default: false version_added: '3.3.0' @@ -175,12 +163,12 @@ options: type: int version_added: '3.8.0' notes: - - To just run a C(terraform plan), use check mode. -requirements: [ "terraform" ] + - To just run a C(terraform plan), use check mode. +requirements: ["terraform"] author: "Ryan Scott Brown (@ryansb)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Basic deploy of a service community.general.terraform: project_path: '{{ project_dir }}' @@ -248,7 +236,7 @@ EXAMPLES = """ # └── terraform-provider-vsphere_v1.26.0_x4 """ -RETURN = """ +RETURN = r""" outputs: type: complex description: A dictionary of all the TF outputs by their assigned name. Use RV(ignore:outputs.MyOutputName.value) to access the value. @@ -258,18 +246,18 @@ outputs: sensitive: type: bool returned: always - description: Whether Terraform has marked this value as sensitive + description: Whether Terraform has marked this value as sensitive. type: type: str returned: always - description: The type of the value (string, int, etc) + description: The type of the value (string, int, etc). value: type: str returned: always - description: The value of the output as interpolated by Terraform + description: The value of the output as interpolated by Terraform. stdout: type: str - description: Full C(terraform) command stdout, in case you want to display it or examine the event log + description: Full C(terraform) command stdout, in case you want to display it or examine the event log. returned: always sample: '' command: diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index cd823e6115..78df0cd45f 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -8,20 +8,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: timezone short_description: Configure timezone setting description: - - This module configures the timezone setting, both of the system clock and of the hardware clock. - If you want to set up the NTP, use M(ansible.builtin.service) module. + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use + M(ansible.builtin.service) module. - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. - - Several different tools are used depending on the OS/Distribution involved. - For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). - On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. - On AIX, C(chtz) is used. - - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, - when not using a minimal installation like Alpine Linux). + - Several different tools are used depending on the OS/Distribution involved. For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) + or C(/etc/timezone) and C(hwclock). On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. On + AIX, C(chtz) is used. + - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, when not using + a minimal installation like Alpine Linux). - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. extends_documentation_fragment: - community.general.attributes @@ -35,51 +33,49 @@ options: description: - Name of the timezone for the system clock. - Default is to keep current setting. - - B(At least one of name and hwclock are required.) + - B(At least one) of O(name) and O(hwclock) are required. type: str hwclock: description: - Whether the hardware clock is in UTC or in local timezone. - Default is to keep current setting. - - Note that this option is recommended not to change and may fail - to configure, especially on virtual environments such as AWS. - - B(At least one of name and hwclock are required.) - - I(Only used on Linux.) + - Note that this option is recommended not to change and may fail to configure, especially on virtual environments such as AWS. + - B(At least one) of O(name) and O(hwclock) are required. + - I(Only used on Linux). type: str - aliases: [ rtc ] - choices: [ local, UTC ] + aliases: [rtc] + choices: [local, UTC] notes: - On Ubuntu 24.04 the C(util-linux-extra) package is required to provide the C(hwclock) command. - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone. - - On AIX only Olson/tz database timezones are usable (POSIX is not supported). - An OS reboot is also required on AIX for the new timezone setting to take effect. - Note that AIX 6.1+ is needed (OS level 61 or newer). + - On AIX only Olson/tz database timezones are usable (POSIX is not supported). An OS reboot is also required on AIX for the new timezone setting + to take effect. Note that AIX 6.1+ is needed (OS level 61 or newer). author: - Shinichi TAMURA (@tmshn) - Jasper Lievisse Adriaanse (@jasperla) - Indrajit Raychaudhuri (@indrajitr) -''' +""" -RETURN = r''' +RETURN = r""" diff: description: The differences about the given arguments. returned: success type: complex contains: before: - description: The values before change + description: The values before change. type: dict after: - description: The values after change + description: The values after change. type: dict -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set timezone to Asia/Tokyo become: true community.general.timezone: name: Asia/Tokyo -''' +""" import errno import os diff --git a/plugins/modules/twilio.py b/plugins/modules/twilio.py index 270320c465..dc397d4831 100644 --- a/plugins/modules/twilio.py +++ b/plugins/modules/twilio.py @@ -9,20 +9,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: twilio short_description: Sends a text message to a mobile phone through Twilio description: - - Sends a text message to a phone number through the Twilio messaging API. + - Sends a text message to a phone number through the Twilio messaging API. notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased + or verified phone number to send the text message. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -50,7 +47,7 @@ options: description: - One or more phone numbers to send the text message to, format C(+15551112222). required: true - aliases: [ to_number ] + aliases: [to_number] from_number: type: str description: @@ -59,14 +56,13 @@ options: media_url: type: str description: - - A URL with a picture, video or sound clip to send with an MMS - (multimedia message) instead of a plain SMS. + - A URL with a picture, video or sound clip to send with an MMS (multimedia message) instead of a plain SMS. required: false author: "Matt Makai (@makaimc)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # send an SMS about the build status to (555) 303 5681 # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account @@ -75,8 +71,8 @@ EXAMPLES = ''' msg: All servers with webserver role are now configured. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 + from_number: "+15552014545" + to_number: "+15553035681" delegate_to: localhost # send an SMS to multiple phone numbers about the deployment @@ -87,11 +83,11 @@ EXAMPLES = ''' msg: This server configuration is now complete. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15553258899 + from_number: "+15553258899" to_numbers: - - +15551113232 - - +12025551235 - - +19735559010 + - "+15551113232" + - "+12025551235" + - "+19735559010" delegate_to: localhost # send an MMS to a single recipient with an update on the deployment @@ -103,11 +99,11 @@ EXAMPLES = ''' msg: Deployment complete! account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 + from_number: "+15552014545" + to_number: "+15553035681" media_url: https://demo.twilio.com/logo.png delegate_to: localhost -''' +""" # ======================================= # twilio module support methods diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py index ddf9f35605..6364cdc45b 100644 --- a/plugins/modules/typetalk.py +++ b/plugins/modules/typetalk.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: typetalk short_description: Send a message to typetalk description: - - Send a message to typetalk using typetalk API + - Send a message to typetalk using typetalk API. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,35 +25,35 @@ options: client_id: type: str description: - - OAuth2 client ID + - OAuth2 client ID. required: true client_secret: type: str description: - - OAuth2 client secret + - OAuth2 client secret. required: true topic: type: int description: - - topic id to post message + - Topic id to post message. required: true msg: type: str description: - - message body + - Message body. required: true -requirements: [ json ] +requirements: [json] author: "Takashi Someda (@tksmd)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to typetalk community.general.typetalk: client_id: 12345 client_secret: 12345 topic: 1 msg: install completed -''' +""" import json From c3ed2144e2585ee7c91761f666767154e299c734 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 00:00:10 +1300 Subject: [PATCH 398/482] callback plugins: use f-strings (#9321) * callback plugins: use f-strings * add changelog frag * manual change for few occurrences * manual change for few occurrences * adjustment from review * adjustment from review * adjustment from review * Update plugins/callback/splunk.py Co-authored-by: Felix Fontein * replace str templating with JSON templating --------- Co-authored-by: Felix Fontein --- .../fragments/9321-fstr-callback-plugins.yml | 22 +++++ plugins/callback/cgroup_memory_recap.py | 4 +- plugins/callback/context_demo.py | 6 +- plugins/callback/counter_enabled.py | 86 +++++++++---------- plugins/callback/dense.py | 54 ++++++------ plugins/callback/diy.py | 6 +- plugins/callback/elastic.py | 10 +-- plugins/callback/jabber.py | 6 +- plugins/callback/log_plays.py | 19 ++-- plugins/callback/loganalytics.py | 7 +- plugins/callback/logdna.py | 4 +- plugins/callback/logentries.py | 12 +-- plugins/callback/mail.py | 52 +++++------ plugins/callback/nrdp.py | 19 ++-- plugins/callback/opentelemetry.py | 15 ++-- plugins/callback/say.py | 16 ++-- plugins/callback/selective.py | 42 ++++----- plugins/callback/slack.py | 31 ++++--- plugins/callback/splunk.py | 5 +- plugins/callback/timestamp.py | 4 +- plugins/callback/unixy.py | 80 ++++++++--------- plugins/callback/yaml.py | 4 +- 22 files changed, 251 insertions(+), 253 deletions(-) create mode 100644 changelogs/fragments/9321-fstr-callback-plugins.yml diff --git a/changelogs/fragments/9321-fstr-callback-plugins.yml b/changelogs/fragments/9321-fstr-callback-plugins.yml new file mode 100644 index 0000000000..d79d3cbfa0 --- /dev/null +++ b/changelogs/fragments/9321-fstr-callback-plugins.yml @@ -0,0 +1,22 @@ +minor_changes: + - cgroup_memory_recap callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - context_demo callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - counter_enabled callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - dense callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - diy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - elastic callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - jabber callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - log_plays callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - loganalytics callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - logdna callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - logentries callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - mail callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - nrdp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - opentelemetry callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - say callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - selective callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - slack callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - splunk callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - timestamp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - unixy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). + - yaml callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index 643f0f0b88..e8599aad49 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -114,7 +114,7 @@ class CallbackModule(CallbackBase): max_results = int(f.read().strip()) / 1024 / 1024 self._display.banner('CGROUP MEMORY RECAP') - self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results) + self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n') for task, memory in self.task_results: - self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory)) + self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB') diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index b9558fc064..94dfdf45ad 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -38,15 +38,15 @@ class CallbackModule(CallbackBase): self.play = None def v2_on_any(self, *args, **kwargs): - self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task)) + self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---") self._display.display(" --- ARGS ") for i, a in enumerate(args): - self._display.display(' %s: %s' % (i, a)) + self._display.display(f' {i}: {a}') self._display.display(" --- KWARGS ") for k in kwargs: - self._display.display(' %s: %s' % (k, kwargs[k])) + self._display.display(f' {k}: {kwargs[k]}') def v2_playbook_on_play_start(self, play): self.play = play diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 27adc97a6c..9bb1df07d4 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -71,7 +71,7 @@ class CallbackModule(CallbackBase): if not name: msg = u"play" else: - msg = u"PLAY [%s]" % name + msg = f"PLAY [{name}]" self._play = play @@ -91,25 +91,17 @@ class CallbackModule(CallbackBase): for host in hosts: stat = stats.summarize(host) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat), - colorize(u'ok', stat['ok'], C.COLOR_OK), - colorize(u'changed', stat['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', stat['failures'], C.COLOR_ERROR), - colorize(u'rescued', stat['rescued'], C.COLOR_OK), - colorize(u'ignored', stat['ignored'], C.COLOR_WARN)), + self._display.display( + f"{hostcolor(host, stat)} : {colorize(u'ok', stat['ok'], C.COLOR_OK)} {colorize(u'changed', stat['changed'], C.COLOR_CHANGED)} " + f"{colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize(u'failed', stat['failures'], C.COLOR_ERROR)} " + f"{colorize(u'rescued', stat['rescued'], C.COLOR_OK)} {colorize(u'ignored', stat['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat, False), - colorize(u'ok', stat['ok'], None), - colorize(u'changed', stat['changed'], None), - colorize(u'unreachable', stat['unreachable'], None), - colorize(u'failed', stat['failures'], None), - colorize(u'rescued', stat['rescued'], None), - colorize(u'ignored', stat['ignored'], None)), + self._display.display( + f"{hostcolor(host, stat, False)} : {colorize(u'ok', stat['ok'], None)} {colorize(u'changed', stat['changed'], None)} " + f"{colorize(u'unreachable', stat['unreachable'], None)} {colorize(u'failed', stat['failures'], None)} " + f"{colorize(u'rescued', stat['rescued'], None)} {colorize(u'ignored', stat['ignored'], None)}", log_only=True ) @@ -124,12 +116,14 @@ class CallbackModule(CallbackBase): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {_custom_stats}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {_custom_stats_run}') self._display.display("", screen_only=True) def v2_playbook_on_task_start(self, task, is_conditional): @@ -143,13 +137,13 @@ class CallbackModule(CallbackBase): # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: - args = ', '.join(('%s=%s' % a for a in task.args.items())) - args = ' %s' % args - self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args)) + args = ', '.join(('{k}={v}' for k, v in task.args.items())) + args = f' {args}' + self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]") if self._display.verbosity >= 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) + self._display.display(f"task path: {path}", color=C.COLOR_DEBUG) self._host_counter = self._previous_batch_total self._task_counter += 1 @@ -166,15 +160,15 @@ class CallbackModule(CallbackBase): return elif result._result.get('changed', False): if delegated_vars: - msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_CHANGED else: if delegated_vars: - msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_OK self._handle_warnings(result._result) @@ -185,7 +179,7 @@ class CallbackModule(CallbackBase): self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): - msg += " => %s" % (self._dump_results(result._result),) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=color) def v2_runner_on_failed(self, result, ignore_errors=False): @@ -206,14 +200,16 @@ class CallbackModule(CallbackBase): else: if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) else: - self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) @@ -231,9 +227,9 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) else: - msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" if self._run_is_verbose(result): - msg += " => %s" % self._dump_results(result._result) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): @@ -244,11 +240,13 @@ class CallbackModule(CallbackBase): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) else: - self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 490705fd27..e0419644ef 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -195,7 +195,7 @@ class CallbackModule(CallbackModule_default): self.disabled = True def __del__(self): - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") def _add_host(self, result, status): name = result._host.get_name() @@ -252,7 +252,7 @@ class CallbackModule(CallbackModule_default): def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) - sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}:') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -260,7 +260,7 @@ class CallbackModule(CallbackModule_default): for name in self.hosts: sys.stdout.write(' ') if self.hosts[name].get('delegate', None): - sys.stdout.write(self.hosts[name]['delegate'] + '>') + sys.stdout.write(f"{self.hosts[name]['delegate']}>") sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() @@ -274,8 +274,8 @@ class CallbackModule(CallbackModule_default): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) - sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}') + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -284,7 +284,7 @@ class CallbackModule(CallbackModule_default): def _display_results(self, result, status): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False @@ -309,15 +309,15 @@ class CallbackModule(CallbackModule_default): if result._task.loop and 'results' in result._result: self._process_items(result) else: - sys.stdout.write(colors[status] + status + ': ') + sys.stdout.write(f"{colors[status] + status}: ") delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) + sys.stdout.write(f"{vt100.reset + result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") else: sys.stdout.write(result._host.get_name()) - sys.stdout.write(': ' + dump + '\n') + sys.stdout.write(f": {dump}\n") sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) sys.stdout.flush() @@ -327,7 +327,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_play_start(self, play): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) @@ -341,14 +341,14 @@ class CallbackModule(CallbackModule_default): name = play.get_name().strip() if not name: name = 'unnamed' - sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_task_start(self, task, is_conditional): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: # Do not clear line, since we want to retain the previous output sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) @@ -365,14 +365,14 @@ class CallbackModule(CallbackModule_default): self.count['task'] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_handler_task_start(self, task): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) @@ -388,7 +388,7 @@ class CallbackModule(CallbackModule_default): self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -451,13 +451,13 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False - sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_include(self, included_file): @@ -465,7 +465,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_stats(self, stats): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -476,22 +476,16 @@ class CallbackModule(CallbackModule_default): sys.stdout.write(vt100.bold + vt100.underline) sys.stdout.write('SUMMARY') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) self._display.display( - u"%s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN), - ), + f"{hostcolor(h, t)} : {colorize(u'ok', t['ok'], C.COLOR_OK)} {colorize(u'changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize(u'failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize(u'rescued', t['rescued'], C.COLOR_OK)} {colorize(u'ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index cf9369e4b4..e3cda4ac7c 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -828,9 +828,9 @@ class CallbackModule(Default): _callback_options = ['msg', 'msg_color'] for option in _callback_options: - _option_name = '%s_%s' % (_callback_type, option) + _option_name = f'{_callback_type}_{option}' _option_template = variables.get( - self.DIY_NS + "_" + _option_name, + f"{self.DIY_NS}_{_option_name}", self.get_option(_option_name) ) _ret.update({option: self._template( @@ -867,7 +867,7 @@ class CallbackModule(Default): handler=None, result=None, stats=None, remove_attr_ref_loop=True): def _get_value(obj, attr=None, method=None): if attr: - return getattr(obj, attr, getattr(obj, "_" + attr, None)) + return getattr(obj, attr, getattr(obj, f"_{attr}", None)) if method: _method = getattr(obj, method) diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 0c94d1ba33..9b5942d962 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -118,7 +118,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -166,7 +166,7 @@ class ElasticSource(object): args = None if not task.no_log and not hide_task_arguments: - args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ', '.join((f'{k}={v}' for k, v in task.args.items())) tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) @@ -225,7 +225,7 @@ class ElasticSource(object): def create_span_data(self, apm_cli, task_data, host_data): """ create the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = "success" status = "success" @@ -259,7 +259,7 @@ class ElasticSource(object): "ansible.task.host.status": host_data.status}) as span: span.outcome = status if 'failure' in status: - exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message)) + exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}") apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True) def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): @@ -288,7 +288,7 @@ class ElasticSource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" class CallbackModule(CallbackBase): diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index 302687b708..f63f07b4f5 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -102,7 +102,7 @@ class CallbackModule(CallbackBase): """Display Playbook and play start messages""" self.play = play name = play.name - self.send_msg("Ansible starting play: %s" % (name)) + self.send_msg(f"Ansible starting play: {name}") def playbook_on_stats(self, stats): name = self.play @@ -118,7 +118,7 @@ class CallbackModule(CallbackBase): if failures or unreachable: out = self.debug - self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) + self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}") else: out = self.debug - self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) + self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}") diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index daa88bcc11..190e1c60c4 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -57,7 +57,10 @@ class CallbackModule(CallbackBase): CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" - MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n" + + @staticmethod + def _make_msg(now, playbook, task_name, task_action, category, data): + return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n" def __init__(self): @@ -82,22 +85,12 @@ class CallbackModule(CallbackBase): invocation = data.pop('invocation', None) data = json.dumps(data, cls=AnsibleJSONEncoder) if invocation is not None: - data = json.dumps(invocation) + " => %s " % data + data = f"{json.dumps(invocation)} => {data} " path = os.path.join(self.log_folder, result._host.get_name()) now = time.strftime(self.TIME_FORMAT, time.localtime()) - msg = to_bytes( - self.MSG_FORMAT - % dict( - now=now, - playbook=self.playbook, - task_name=result._task.name, - task_action=result._task.action, - category=category, - data=data, - ) - ) + msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data)) with open(path, "ab") as fd: fd.write(msg) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index fd1b2772c4..98ceb1e7a3 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -84,18 +84,17 @@ class AzureLogAnalyticsSource(object): def __build_signature(self, date, workspace_id, shared_key, content_length): # Build authorisation signature for Azure log analytics API call - sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format( - str(content_length), date) + sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs" utf8_sigs = sigs.encode('utf-8') decoded_shared_key = base64.b64decode(shared_key) hmac_sha256_sigs = hmac.new( decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') - signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash) + signature = f"SharedKey {workspace_id}:{encoded_hash}" return signature def __build_workspace_url(self, workspace_id): - return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) + return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01" def __rfc1123date(self): return now().strftime('%a, %d %b %Y %H:%M:%S GMT') diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index fc9a81ac8a..cbf9df4b1c 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -73,7 +73,7 @@ except ImportError: # Getting MAC Address of system: def get_mac(): - mac = "%012x" % getnode() + mac = f"{getnode():012x}" return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) @@ -161,7 +161,7 @@ class CallbackModule(CallbackBase): if ninvalidKeys > 0: for key in invalidKeys: del meta[key] - meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) + meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}" return meta def sanitizeJSON(self, data): diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index c1271543ad..bc8b1cb5bd 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -153,7 +153,7 @@ class PlainTextSocketAppender(object): self.open_connection() return except Exception as e: - self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) + self._display.vvvv(f"Unable to connect to Logentries: {to_text(e)}") root_delay *= 2 if root_delay > self.MAX_DELAY: @@ -162,7 +162,7 @@ class PlainTextSocketAppender(object): wait_for = root_delay + random.uniform(0, root_delay) try: - self._display.vvvv("sleeping %s before retry" % wait_for) + self._display.vvvv(f"sleeping {wait_for} before retry") time.sleep(wait_for) except KeyboardInterrupt: raise @@ -249,7 +249,7 @@ class CallbackModule(CallbackBase): self.use_tls = self.get_option('use_tls') self.flatten = self.get_option('flatten') except KeyError as e: - self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) + self._display.warning(f"Missing option for Logentries callback plugin: {to_text(e)}") self.disabled = True try: @@ -268,10 +268,10 @@ class CallbackModule(CallbackBase): if not self.disabled: if self.use_tls: - self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS") self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) else: - self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}") self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) self._appender.reopen_connection() @@ -284,7 +284,7 @@ class CallbackModule(CallbackBase): def emit(self, record): msg = record.rstrip('\n') - msg = "{0} {1}".format(self.token, msg) + msg = f"{self.token} {msg}" self._appender.put(msg) self._display.vvvv("Sent event to logentries") diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 1b847ea34c..2e26fe84eb 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -135,14 +135,14 @@ class CallbackModule(CallbackBase): if self.bcc: bcc_addresses = email.utils.getaddresses(self.bcc) - content = 'Date: %s\n' % email.utils.formatdate() - content += 'From: %s\n' % email.utils.formataddr(sender_address) + content = f'Date: {email.utils.formatdate()}\n' + content += f'From: {email.utils.formataddr(sender_address)}\n' if self.to: - content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses]) + content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n" if self.cc: - content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses]) - content += 'Message-ID: %s\n' % email.utils.make_msgid(domain=self.get_option('message_id_domain')) - content += 'Subject: %s\n\n' % subject.strip() + content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n" + content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n" + content += f'Subject: {subject.strip()}\n\n' content += body addresses = to_addresses @@ -159,23 +159,22 @@ class CallbackModule(CallbackBase): smtp.quit() def subject_msg(self, multiline, failtype, linenr): - return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) + msg = multiline.strip('\r\n').splitlines()[linenr] + return f'{failtype}: {msg}' def indent(self, multiline, indent=8): return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) def body_blob(self, multiline, texttype): ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = 'with the following %s:\n\n' % texttype - blob = '' - for line in multiline.strip('\r\n').splitlines(): - blob += '%s\n' % line - return intro + self.indent(blob) + '\n' + intro = f'with the following {texttype}:\n\n' + blob = "\n".join(multiline.strip('\r\n').splitlines()) + return f"{intro}{self.indent(blob)}\n" def mail_result(self, result, failtype): host = result._host.get_name() if not self.sender: - self.sender = '"Ansible: %s" ' % host + self.sender = f'"Ansible: {host}" ' # Add subject if self.itembody: @@ -191,31 +190,32 @@ class CallbackModule(CallbackBase): elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ subject = self.subject_msg(result._result['exception'], failtype, -1) else: - subject = '%s: %s' % (failtype, result._task.name or result._task.action) + subject = f'{failtype}: {result._task.name or result._task.action}' # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) + body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n' if result._task.name: - body += 'Task: %s\n' % result._task.name - body += 'Module: %s\n' % result._task.action - body += 'Host: %s\n' % host + body += f'Task: {result._task.name}\n' + body += f'Module: {result._task.action}\n' + body += f'Host: {host}\n' body += '\n' # Add task information (as much as possible) body += 'The following task failed:\n\n' if 'invocation' in result._result: - body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) + body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n") elif result._task.name: - body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) + body += self.indent(f'{result._task.name} ({result._task.action})\n') else: - body += self.indent('%s\n' % result._task.action) + body += self.indent(f'{result._task.action}\n') body += '\n' # Add item / message if self.itembody: body += self.itembody elif result._result.get('failed_when_result') is True: - body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' + fail_cond = self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + body += f"due to the following condition:\n\n{fail_cond}\n\n" elif result._result.get('msg'): body += self.body_blob(result._result['msg'], 'message') @@ -228,13 +228,13 @@ class CallbackModule(CallbackBase): body += self.body_blob(result._result['exception'], 'exception') if result._result.get('warnings'): for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}') if result._result.get('deprecations'): for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}') body += 'and a complete dump of the error:\n\n' - body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) + body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}') self.mail(subject=subject, body=body) @@ -257,4 +257,4 @@ class CallbackModule(CallbackBase): def v2_runner_item_on_failed(self, result): # Pass item information to task failure self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) + self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'") diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 62f4a89ec8..b937049dc3 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -132,10 +132,10 @@ class CallbackModule(CallbackBase): xmldata = "\n" xmldata += "\n" xmldata += "\n" - xmldata += "%s\n" % self.hostname - xmldata += "%s\n" % self.servicename - xmldata += "%d\n" % state - xmldata += "%s\n" % msg + xmldata += f"{self.hostname}\n" + xmldata += f"{self.servicename}\n" + xmldata += f"{state}\n" + xmldata += f"{msg}\n" xmldata += "\n" xmldata += "\n" @@ -152,7 +152,7 @@ class CallbackModule(CallbackBase): validate_certs=self.validate_nrdp_certs) return response.read() except Exception as ex: - self._display.warning("NRDP callback cannot send result {0}".format(ex)) + self._display.warning(f"NRDP callback cannot send result {ex}") def v2_playbook_on_play_start(self, play): ''' @@ -170,17 +170,16 @@ class CallbackModule(CallbackBase): critical = warning = 0 for host in hosts: stat = stats.summarize(host) - gstats += "'%s_ok'=%d '%s_changed'=%d \ - '%s_unreachable'=%d '%s_failed'=%d " % \ - (host, stat['ok'], host, stat['changed'], - host, stat['unreachable'], host, stat['failures']) + gstats += ( + f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " + ) # Critical when failed tasks or unreachable host critical += stat['failures'] critical += stat['unreachable'] # Warning when changed tasks warning += stat['changed'] - msg = "%s | %s" % (name, gstats) + msg = f"{name} | {gstats}" if critical: # Send Critical self._send_nrdp(self.CRITICAL, msg) diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 8dc627c214..44c563019c 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -209,7 +209,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -347,7 +347,7 @@ class OpenTelemetrySource(object): def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): """ update the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = 'success' res = {} @@ -470,7 +470,7 @@ class OpenTelemetrySource(object): def get_error_message_from_results(results, action): for result in results: if result.get('failed', False): - return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result)) + return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}" @staticmethod def _last_line(text): @@ -482,14 +482,14 @@ class OpenTelemetrySource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" @staticmethod def enrich_error_message_from_results(results, action): message = "" for result in results: if result.get('failed', False): - message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message) + message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}" return message @@ -535,8 +535,9 @@ class CallbackModule(CallbackBase): environment_variable = self.get_option('enable_from_environment') if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true': self.disabled = True - self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. " - "Disabling the `opentelemetry` callback plugin.".format(environment_variable)) + self._display.warning( + f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin." + ) self.hide_task_arguments = self.get_option('hide_task_arguments') diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 9d96ad74d9..357f84ae08 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -50,7 +50,7 @@ class CallbackModule(CallbackBase): self.synthesizer = get_bin_path('say') if platform.system() != 'Darwin': # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) + self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter") else: self.FAILED_VOICE = 'Zarvox' self.REGULAR_VOICE = 'Trinoids' @@ -69,7 +69,7 @@ class CallbackModule(CallbackBase): # ansible will not call any callback if disabled is set to True if not self.synthesizer: self.disabled = True - self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) + self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled") def say(self, msg, voice): cmd = [self.synthesizer, msg] @@ -78,7 +78,7 @@ class CallbackModule(CallbackBase): subprocess.call(cmd) def runner_on_failed(self, host, res, ignore_errors=False): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_ok(self, host, res): self.say("pew", self.LASER_VOICE) @@ -87,13 +87,13 @@ class CallbackModule(CallbackBase): self.say("pew", self.LASER_VOICE) def runner_on_unreachable(self, host, res): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): self.say("pew", self.LASER_VOICE) def runner_on_async_failed(self, host, res, jid): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def playbook_on_start(self): self.say("Running Playbook", self.REGULAR_VOICE) @@ -103,15 +103,15 @@ class CallbackModule(CallbackBase): def playbook_on_task_start(self, name, is_conditional): if not is_conditional: - self.say("Starting task: %s" % name, self.REGULAR_VOICE) + self.say(f"Starting task: {name}", self.REGULAR_VOICE) else: - self.say("Notifying task: %s" % name, self.REGULAR_VOICE) + self.say(f"Notifying task: {name}", self.REGULAR_VOICE) def playbook_on_setup(self): self.say("Gathering facts", self.REGULAR_VOICE) def playbook_on_play_start(self, name): - self.say("Starting play: %s" % name, self.HAPPY_VOICE) + self.say(f"Starting play: {name}", self.HAPPY_VOICE) def playbook_on_stats(self, stats): self.say("Play complete", self.HAPPY_VOICE) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 0696757837..3cea24ff1a 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -48,13 +48,13 @@ from ansible.module_utils.common.text.converters import to_text DONT_COLORIZE = False COLORS = { 'normal': '\033[0m', - 'ok': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_OK]), + 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m', 'bold': '\033[1m', 'not_so_bold': '\033[1m\033[34m', - 'changed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_CHANGED]), - 'failed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_ERROR]), + 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m', + 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m', 'endc': '\033[0m', - 'skipped': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_SKIP]), + 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m', } @@ -73,7 +73,7 @@ def colorize(msg, color): if DONT_COLORIZE: return msg else: - return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc']) + return f"{COLORS[color]}{msg}{COLORS['endc']}" class CallbackModule(CallbackBase): @@ -106,15 +106,15 @@ class CallbackModule(CallbackBase): line_length = 120 if self.last_skipped: print() - line = "# {0} ".format(task_name) - msg = colorize("{0}{1}".format(line, '*' * (line_length - len(line))), 'bold') + line = f"# {task_name} " + msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold') print(msg) def _indent_text(self, text, indent_level): lines = text.splitlines() result_lines = [] for l in lines: - result_lines.append("{0}{1}".format(' ' * indent_level, l)) + result_lines.append(f"{' ' * indent_level}{l}") return '\n'.join(result_lines) def _print_diff(self, diff, indent_level): @@ -147,19 +147,19 @@ class CallbackModule(CallbackBase): change_string = colorize('FAILED!!!', color) else: color = 'changed' if changed else 'ok' - change_string = colorize("changed={0}".format(changed), color) + change_string = colorize(f"changed={changed}", color) msg = colorize(msg, color) line_length = 120 spaces = ' ' * (40 - len(name) - indent_level) - line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string) + line = f"{' ' * indent_level} * {name}{spaces}- {change_string}" if len(msg) < 50: - line += ' -- {0}'.format(msg) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {msg}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(msg, indent_level + 4)) if diff: @@ -239,8 +239,10 @@ class CallbackModule(CallbackBase): else: color = 'ok' - msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format( - host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored']) + msg = ( + f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable=" + f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}" + ) print(colorize(msg, color)) def v2_runner_on_skipped(self, result, **kwargs): @@ -252,17 +254,15 @@ class CallbackModule(CallbackBase): line_length = 120 spaces = ' ' * (31 - len(result._host.name) - 4) - line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'), - spaces, - colorize("skipped", 'skipped'),) + line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}" reason = result._result.get('skipped_reason', '') or \ result._result.get('skip_reason', '') if len(reason) < 50: - line += ' -- {0}'.format(reason) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {reason}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(reason, 8)) print(reason) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 2a995992ee..0e58628c35 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -138,14 +138,13 @@ class CallbackModule(CallbackBase): headers=headers) return response.read() except Exception as e: - self._display.warning(u'Could not submit message to Slack: %s' % - to_text(e)) + self._display.warning(f'Could not submit message to Slack: {to_text(e)}') def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) title = [ - '*Playbook initiated* (_%s_)' % self.guid + f'*Playbook initiated* (_{self.guid}_)' ] invocation_items = [] @@ -156,23 +155,23 @@ class CallbackModule(CallbackBase): subset = context.CLIARGS['subset'] inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] - invocation_items.append('Inventory: %s' % ', '.join(inventory)) + invocation_items.append(f"Inventory: {', '.join(inventory)}") if tags and tags != ['all']: - invocation_items.append('Tags: %s' % ', '.join(tags)) + invocation_items.append(f"Tags: {', '.join(tags)}") if skip_tags: - invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) + invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}") if subset: - invocation_items.append('Limit: %s' % subset) + invocation_items.append(f'Limit: {subset}') if extra_vars: - invocation_items.append('Extra Vars: %s' % - ' '.join(extra_vars)) + invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}") - title.append('by *%s*' % context.CLIARGS['remote_user']) + title.append(f"by *{context.CLIARGS['remote_user']}*") - title.append('\n\n*%s*' % self.playbook_name) + title.append(f'\n\n*{self.playbook_name}*') msg_items = [' '.join(title)] if invocation_items: - msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + _inv_item = '\n'.join(invocation_items) + msg_items.append(f'```\n{_inv_item}\n```') msg = '\n'.join(msg_items) @@ -192,8 +191,8 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): """Display Play start messages""" - name = play.name or 'Play name not specified (%s)' % play._uuid - msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + name = play.name or f'Play name not specified ({play._uuid})' + msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*' attachments = [ { 'fallback': msg, @@ -228,7 +227,7 @@ class CallbackModule(CallbackBase): attachments = [] msg_items = [ - '*Playbook Complete* (_%s_)' % self.guid + f'*Playbook Complete* (_{self.guid}_)' ] if failures or unreachable: color = 'danger' @@ -237,7 +236,7 @@ class CallbackModule(CallbackBase): color = 'good' msg_items.append('\n*Success!*') - msg_items.append('```\n%s\n```' % t) + msg_items.append(f'```\n{t}\n```') msg = '\n'.join(msg_items) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index b2ce48de25..966170594c 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -153,15 +153,14 @@ class SplunkHTTPCollectorSource(object): data['ansible_result'] = result._result # This wraps the json payload in and outer json event needed by Splunk - jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True) - jsondata = '{"event":' + jsondata + "}" + jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) open_url( url, jsondata, headers={ 'Content-type': 'application/json', - 'Authorization': 'Splunk ' + authtoken + 'Authorization': f"Splunk {authtoken}" }, method='POST', validate_certs=validate_certs diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py index 07cd8d239c..d8737b9e4c 100644 --- a/plugins/callback/timestamp.py +++ b/plugins/callback/timestamp.py @@ -85,7 +85,7 @@ def banner(self, msg, color=None, cows=True): msg = to_text(msg) if self.b_cowsay and cows: try: - self.banner_cowsay("%s @ %s" % (msg, timestamp)) + self.banner_cowsay(f"{msg} @ {timestamp}") return except OSError: self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.") @@ -98,7 +98,7 @@ def banner(self, msg, color=None, cows=True): if star_len <= 3: star_len = 3 stars = "*" * star_len - self.display("\n%s %s %s" % (msg, stars, timestamp), color=color) + self.display(f"\n{msg} {stars} {timestamp}", color=color) class CallbackModule(Default): diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 4908202c23..de0c79088b 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -67,24 +67,24 @@ class CallbackModule(CallbackModule_default): def _process_result_output(self, result, msg): task_host = result._host.get_name() - task_result = "%s %s" % (task_host, msg) + task_result = f"{task_host} {msg}" if self._run_is_verbose(result): - task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4)) + task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}" return task_result if self.delegated_vars: task_delegate_host = self.delegated_vars['ansible_host'] - task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg) + task_result = f"{task_host} -> {task_delegate_host} {msg}" if result._result.get('msg') and result._result.get('msg') != "All items completed": - task_result += " | msg: " + to_text(result._result.get('msg')) + task_result += f" | msg: {to_text(result._result.get('msg'))}" if result._result.get('stdout'): - task_result += " | stdout: " + result._result.get('stdout') + task_result += f" | stdout: {result._result.get('stdout')}" if result._result.get('stderr'): - task_result += " | stderr: " + result._result.get('stderr') + task_result += f" | stderr: {result._result.get('stderr')}" return task_result @@ -92,28 +92,28 @@ class CallbackModule(CallbackModule_default): self._get_task_display_name(task) if self.task_display_name is not None: if task.check_mode and self.get_option('check_mode_markers'): - self._display.display("%s (check mode)..." % self.task_display_name) + self._display.display(f"{self.task_display_name} (check mode)...") else: - self._display.display("%s..." % self.task_display_name) + self._display.display(f"{self.task_display_name}...") def v2_playbook_on_handler_task_start(self, task): self._get_task_display_name(task) if self.task_display_name is not None: if task.check_mode and self.get_option('check_mode_markers'): - self._display.display("%s (via handler in check mode)... " % self.task_display_name) + self._display.display(f"{self.task_display_name} (via handler in check mode)... ") else: - self._display.display("%s (via handler)... " % self.task_display_name) + self._display.display(f"{self.task_display_name} (via handler)... ") def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if play.check_mode and self.get_option('check_mode_markers'): if name and play.hosts: - msg = u"\n- %s (in check mode) on hosts: %s -" % (name, ",".join(play.hosts)) + msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -" else: msg = u"- check mode -" else: if name and play.hosts: - msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts)) + msg = f"\n- {name} on hosts: {','.join(play.hosts)} -" else: msg = u"---" @@ -126,7 +126,7 @@ class CallbackModule(CallbackModule_default): msg = "skipped" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) else: return @@ -136,10 +136,10 @@ class CallbackModule(CallbackModule_default): msg = "failed" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): self._preprocess_result(result) @@ -149,13 +149,13 @@ class CallbackModule(CallbackModule_default): msg = "done" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" display_color = C.COLOR_CHANGED task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) elif self.get_option('display_ok_hosts'): task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) def v2_runner_item_on_skipped(self, result): self.v2_runner_on_skipped(result) @@ -173,7 +173,7 @@ class CallbackModule(CallbackModule_default): display_color = C.COLOR_UNREACHABLE task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: @@ -195,25 +195,17 @@ class CallbackModule(CallbackModule_default): # TODO how else can we display these? t = stats.summarize(h) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN)), + self._display.display( + f" {hostcolor(h, t)} : {colorize(u'ok', t['ok'], C.COLOR_OK)} {colorize(u'changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize(u'failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize(u'rescued', t['rescued'], C.COLOR_OK)} {colorize(u'ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t, False), - colorize(u'ok', t['ok'], None), - colorize(u'changed', t['changed'], None), - colorize(u'unreachable', t['unreachable'], None), - colorize(u'failed', t['failures'], None), - colorize(u'rescued', t['rescued'], None), - colorize(u'ignored', t['ignored'], None)), + self._display.display( + f" {hostcolor(h, t, False)} : {colorize(u'ok', t['ok'], None)} {colorize(u'changed', t['changed'], None)} " + f"{colorize(u'unreachable', t['unreachable'], None)} {colorize(u'failed', t['failures'], None)} {colorize(u'rescued', t['rescued'], None)} " + f"{colorize(u'ignored', t['ignored'], None)}", log_only=True ) if stats.custom and self.get_option('show_custom_stats'): @@ -223,12 +215,14 @@ class CallbackModule(CallbackModule_default): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {stat_val}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {stat_val_run}') self._display.display("", screen_only=True) def v2_playbook_on_no_hosts_matched(self): @@ -239,23 +233,23 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_start(self, playbook): if context.CLIARGS['check'] and self.get_option('check_mode_markers'): - self._display.display("Executing playbook %s in check mode" % basename(playbook._file_name)) + self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode") else: - self._display.display("Executing playbook %s" % basename(playbook._file_name)) + self._display.display(f"Executing playbook {basename(playbook._file_name)}") # show CLI arguments if self._display.verbosity > 3: if context.CLIARGS.get('args'): - self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), + self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}", color=C.COLOR_VERBOSE, screen_only=True) for argument in (a for a in context.CLIARGS if a != 'args'): val = context.CLIARGS[argument] if val: - self._display.vvvv('%s: %s' % (argument, val)) + self._display.vvvv(f'{argument}: {val}') def v2_runner_retry(self, result): - msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries']) + msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" if self._run_is_verbose(result): - msg += "Result was: %s" % self._dump_results(result._result) + msg += f"Result was: {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index e41f69ec53..1daf4572d5 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -113,11 +113,11 @@ class CallbackModule(Default): # put changed and skipped into a header line if 'changed' in abridged_result: - dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' + dumped += f"changed={str(abridged_result['changed']).lower()} " del abridged_result['changed'] if 'skipped' in abridged_result: - dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' + dumped += f"skipped={str(abridged_result['skipped']).lower()} " del abridged_result['skipped'] # if we already have stdout, we don't need stdout_lines From d539b00d4c415235ff20ea4d575a47f41ef6511d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 00:00:19 +1300 Subject: [PATCH 399/482] connection plugins: use f-strings (#9322) * connection plugins: use f-strings * add changelog frag --- .../9322-fstr-connection-plugins.yml | 11 ++++++++ plugins/connection/chroot.py | 24 ++++++++-------- plugins/connection/funcd.py | 6 ++-- plugins/connection/incus.py | 24 ++++++---------- plugins/connection/iocage.py | 11 ++++---- plugins/connection/jail.py | 28 +++++++++---------- plugins/connection/lxc.py | 20 ++++++------- plugins/connection/lxd.py | 22 +++++++-------- plugins/connection/qubes.py | 22 +++++++-------- plugins/connection/saltstack.py | 10 +++---- plugins/connection/zone.py | 26 ++++++++--------- 11 files changed, 105 insertions(+), 99 deletions(-) create mode 100644 changelogs/fragments/9322-fstr-connection-plugins.yml diff --git a/changelogs/fragments/9322-fstr-connection-plugins.yml b/changelogs/fragments/9322-fstr-connection-plugins.yml new file mode 100644 index 0000000000..4b3e264cfb --- /dev/null +++ b/changelogs/fragments/9322-fstr-connection-plugins.yml @@ -0,0 +1,11 @@ +minor_changes: + - chroot connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - funcd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - incus connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - iocage connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - jail connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - lxc connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - lxd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - qubes connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - saltstack connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). + - zone connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 3567912359..2586109669 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -120,7 +120,7 @@ class Connection(ConnectionBase): # do some trivial checks for ensuring 'host' is actually a chroot'able dir if not os.path.isdir(self.chroot): - raise AnsibleError("%s is not a directory" % self.chroot) + raise AnsibleError(f"{self.chroot} is not a directory") chrootsh = os.path.join(self.chroot, 'bin/sh') # Want to check for a usable bourne shell inside the chroot. @@ -128,7 +128,7 @@ class Connection(ConnectionBase): # gets really complicated really fast. So we punt on finding that # out. As long as it's a symlink we assume that it will work if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))): - raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) + raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)") def _connect(self): """ connect to the chroot """ @@ -161,7 +161,7 @@ class Connection(ConnectionBase): executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % local_cmd, host=self.chroot) + display.vvv(f"EXEC {local_cmd}", host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -195,7 +195,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -205,27 +205,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") @@ -237,10 +237,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 7765f53110..6184d946e0 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -72,7 +72,7 @@ class Connection(ConnectionBase): raise AnsibleError("Internal Error: this module does not support optimized module pipelining") # totally ignores privilege escalation - display.vvv("EXEC %s" % cmd, host=self.host) + display.vvv(f"EXEC {cmd}", host=self.host) p = self.client.command.run(cmd)[self.host] return p[0], p[1], p[2] @@ -87,14 +87,14 @@ class Connection(ConnectionBase): """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who # take a file directly diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index 8adea2d13a..097b3b800f 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -93,14 +93,14 @@ class Connection(ConnectionBase): """ execute a command on the Incus host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(u"EXEC {0}".format(cmd), + self._display.vvv(f"EXEC {cmd}", host=self._instance()) local_cmd = [ self._incus_cmd, "--project", self.get_option("project"), "exec", - "%s:%s" % (self.get_option("remote"), self._instance()), + f"{self.get_option('remote')}:{self._instance()}", "--", self._play_context.executable, "-c", cmd] @@ -114,12 +114,10 @@ class Connection(ConnectionBase): stderr = to_text(stderr) if stderr == "Error: Instance is not running.\n": - raise AnsibleConnectionFailure("instance not running: %s" % - self._instance()) + raise AnsibleConnectionFailure(f"instance not running: {self._instance()}") if stderr == "Error: Instance not found\n": - raise AnsibleConnectionFailure("instance not found: %s" % - self._instance()) + raise AnsibleConnectionFailure(f"instance not found: {self._instance()}") return process.returncode, stdout, stderr @@ -127,20 +125,18 @@ class Connection(ConnectionBase): """ put a file from local to Incus """ super(Connection, self).put_file(in_path, out_path) - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._instance()) if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [ self._incus_cmd, "--project", self.get_option("project"), "file", "push", "--quiet", in_path, - "%s:%s/%s" % (self.get_option("remote"), - self._instance(), - out_path)] + f"{self.get_option('remote')}:{self._instance()}/{out_path}"] local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -150,16 +146,14 @@ class Connection(ConnectionBase): """ fetch a file from Incus to local """ super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._instance()) local_cmd = [ self._incus_cmd, "--project", self.get_option("project"), "file", "pull", "--quiet", - "%s:%s/%s" % (self.get_option("remote"), - self._instance(), - in_path), + f"{self.get_option('remote')}:{self._instance()}/{in_path}", out_path] local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 79d4f88594..411a81b0d9 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -55,11 +55,12 @@ class Connection(Jail): jail_uuid = self.get_jail_uuid() - kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid) + kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}' - display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format( - iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]), - host=kwargs[Jail.modified_jailname_key]) + display.vvv( + f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}", + host=kwargs[Jail.modified_jailname_key] + ) super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -81,6 +82,6 @@ class Connection(Jail): p.wait() if p.returncode != 0: - raise AnsibleError(u"iocage returned an error: {0}".format(stdout)) + raise AnsibleError(f"iocage returned an error: {stdout}") return stdout.strip('\n') diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 7d0abdde3a..1ebe5b26ef 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -75,14 +75,14 @@ class Connection(ConnectionBase): self.jexec_cmd = self._search_executable('jexec') if self.jail not in self.list_jails(): - raise AnsibleError("incorrect jail name %s" % self.jail) + raise AnsibleError(f"incorrect jail name {self.jail}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_jails(self): p = subprocess.Popen([self.jls_cmd, '-q', 'name'], @@ -97,7 +97,7 @@ class Connection(ConnectionBase): """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: - display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) + display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): @@ -115,11 +115,11 @@ class Connection(ConnectionBase): if self._play_context.remote_user is not None: local_cmd += ['-U', self._play_context.remote_user] # update HOME since -U does not update the jail environment - set_env = 'HOME=~' + self._play_context.remote_user + ' ' + set_env = f"HOME=~{self._play_context.remote_user} " local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] - display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + display.vvv(f"EXEC {local_cmd}", host=self.jail) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -153,7 +153,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -163,27 +163,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("jail connection requires dd command in the jail") @@ -195,10 +195,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 2710e6984e..5a6d3575a4 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -82,7 +82,7 @@ class Connection(ConnectionBase): self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name) self.container = _lxc.Container(self.container_name) if self.container.state == "STOPPED": - raise errors.AnsibleError("%s is not running" % self.container_name) + raise errors.AnsibleError(f"{self.container_name} is not running") @staticmethod def _communicate(pid, in_data, stdin, stdout, stderr): @@ -144,10 +144,10 @@ class Connection(ConnectionBase): read_stdin, write_stdin = os.pipe() kwargs['stdin'] = self._set_nonblocking(read_stdin) - self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name) + self._display.vvv(f"EXEC {local_cmd}", host=self.container_name) pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) if pid == -1: - msg = "failed to attach to container %s" % self.container_name + msg = f"failed to attach to container {self.container_name}" raise errors.AnsibleError(msg) write_stdout = os.close(write_stdout) @@ -174,18 +174,18 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') if not os.path.exists(in_path): - msg = "file or module does not exist: %s" % in_path + msg = f"file or module does not exist: {in_path}" raise errors.AnsibleFileNotFound(msg) try: src_file = open(in_path, "rb") except IOError: traceback.print_exc() - raise errors.AnsibleError("failed to open input file to %s" % in_path) + raise errors.AnsibleError(f"failed to open input file to {in_path}") try: def write_file(args): with open(out_path, 'wb+') as dst_file: @@ -194,7 +194,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file to %s" % out_path + msg = f"failed to transfer file to {out_path}" raise errors.AnsibleError(msg) finally: src_file.close() @@ -202,7 +202,7 @@ class Connection(ConnectionBase): def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') @@ -210,7 +210,7 @@ class Connection(ConnectionBase): dst_file = open(out_path, "wb") except IOError: traceback.print_exc() - msg = "failed to open output file %s" % out_path + msg = f"failed to open output file {out_path}" raise errors.AnsibleError(msg) try: def write_file(args): @@ -225,7 +225,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file from %s to %s" % (in_path, out_path) + msg = f"failed to transfer file from {in_path} to {out_path}" raise errors.AnsibleError(msg) finally: dst_file.close() diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index d850907182..5fa40c3636 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -93,19 +93,19 @@ class Connection(ConnectionBase): """ execute a command on the lxd host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(u"EXEC {0}".format(cmd), host=self._host()) + self._display.vvv(f"EXEC {cmd}", host=self._host()) local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) local_cmd.extend([ "exec", - "%s:%s" % (self.get_option("remote"), self._host()), + f"{self.get_option('remote')}:{self._host()}", "--", self.get_option("executable"), "-c", cmd ]) - self._display.vvvvv(u"EXEC {0}".format(local_cmd), host=self._host()) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') @@ -116,13 +116,13 @@ class Connection(ConnectionBase): stdout = to_text(stdout) stderr = to_text(stderr) - self._display.vvvvv(u"EXEC lxc output: {0} {1}".format(stdout, stderr), host=self._host()) + self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host()) if "is not running" in stderr: - raise AnsibleConnectionFailure("instance not running: %s" % self._host()) + raise AnsibleConnectionFailure(f"instance not running: {self._host()}") if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found": - raise AnsibleConnectionFailure("instance not found: %s" % self._host()) + raise AnsibleConnectionFailure(f"instance not found: {self._host()}") return process.returncode, stdout, stderr @@ -130,10 +130,10 @@ class Connection(ConnectionBase): """ put a file from local to lxd """ super(Connection, self).put_file(in_path, out_path) - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host()) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host()) if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [self._lxc_cmd] if self.get_option("project"): @@ -141,7 +141,7 @@ class Connection(ConnectionBase): local_cmd.extend([ "file", "push", in_path, - "%s:%s/%s" % (self.get_option("remote"), self._host(), out_path) + f"{self.get_option('remote')}:{self._host()}/{out_path}" ]) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -153,14 +153,14 @@ class Connection(ConnectionBase): """ fetch a file from lxd to local """ super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host()) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host()) local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) local_cmd.extend([ "file", "pull", - "%s:%s/%s" % (self.get_option("remote"), self._host(), in_path), + f"{self.get_option('remote')}:{self._host()}/{in_path}", out_path ]) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 8860fbb777..0b8e61f574 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -78,7 +78,7 @@ class Connection(ConnectionBase): """ display.vvvv("CMD: ", cmd) if not cmd.endswith("\n"): - cmd = cmd + "\n" + cmd = f"{cmd}\n" local_cmd = [] # For dom0 @@ -95,7 +95,7 @@ class Connection(ConnectionBase): display.vvvv("Local cmd: ", local_cmd) - display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname) + display.vvv(f"RUN {local_cmd}", host=self._remote_vmname) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -114,42 +114,42 @@ class Connection(ConnectionBase): """Run specified command in a running QubesVM """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.vvvv("CMD IS: %s" % cmd) + display.vvvv(f"CMD IS: {cmd}") rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stdout, stderr)) + display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}") return rc, stdout, stderr def put_file(self, in_path, out_path): """ Place a local file located in 'in_path' inside VM at 'out_path' """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname) with open(in_path, "rb") as fobj: source_data = fobj.read() - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell") + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell") # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and # hope it will have appropriate permissions if retcode == 127: - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data) + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data) if retcode != 0: - raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}') def fetch_file(self, in_path, out_path): """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname) # We are running in dom0 - cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)] + cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"] with open(out_path, "wb") as fobj: p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) p.communicate() if p.returncode != 0: - raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}') def close(self): """ Closing the connection """ diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 1dbc7296c7..da993dfec4 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -59,11 +59,11 @@ class Connection(ConnectionBase): if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % cmd, host=self.host) + self._display.vvv(f"EXEC {cmd}", host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 - res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) + res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"]) if self.host not in res: - raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) + raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct") p = res[self.host] return p['retcode'], p['stdout'], p['stderr'] @@ -81,7 +81,7 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) out_path = self._normalize_path(out_path, '/') - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) @@ -93,7 +93,7 @@ class Connection(ConnectionBase): super(Connection, self).fetch_file(in_path, out_path) in_path = self._normalize_path(in_path, '/') - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] open(out_path, 'wb').write(content) diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 0a591143e0..77d85ad0ee 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -62,14 +62,14 @@ class Connection(ConnectionBase): self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): - raise AnsibleError("incorrect zone name %s" % self.zone) + raise AnsibleError(f"incorrect zone name {self.zone}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_zones(self): process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], @@ -94,7 +94,7 @@ class Connection(ConnectionBase): # stdout, stderr = p.communicate() path = process.stdout.readlines()[0].split(':')[3] - return path + '/root' + return f"{path}/root" def _connect(self): """ connect to the zone; nothing to do here """ @@ -117,7 +117,7 @@ class Connection(ConnectionBase): local_cmd = [self.zlogin_cmd, self.zone, cmd] local_cmd = map(to_bytes, local_cmd) - display.vvv("EXEC %s" % (local_cmd), host=self.zone) + display.vvv(f"EXEC {local_cmd}", host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -149,7 +149,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -159,27 +159,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("zone connection requires dd command in the zone") @@ -191,10 +191,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ From b429e8a2cf643fcf8aac3e154babb961c4573f18 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 00:00:24 +1300 Subject: [PATCH 400/482] xfconf/xfconf_info: add return value version (#9226) * xfconf/xfconf_info: add return value version * add changelog frag * adapt test to helper improvements * rollback copyright update * replace tab with spaces in test yamls --- changelogs/fragments/9226-xfconf-version.yml | 3 ++ plugins/module_utils/xfconf.py | 25 +++++++---- plugins/modules/xfconf.py | 19 +++++--- plugins/modules/xfconf_info.py | 10 ++++- tests/unit/plugins/modules/test_xfconf.yaml | 44 ++++++++++++++++++- .../plugins/modules/test_xfconf_info.yaml | 41 +++++++++++++++-- 6 files changed, 121 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/9226-xfconf-version.yml diff --git a/changelogs/fragments/9226-xfconf-version.yml b/changelogs/fragments/9226-xfconf-version.yml new file mode 100644 index 0000000000..517beb9b96 --- /dev/null +++ b/changelogs/fragments/9226-xfconf-version.yml @@ -0,0 +1,3 @@ +minor_changes: + - xfconf - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). + - xfconf_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py index b63518d0c4..344bd1f3c9 100644 --- a/plugins/module_utils/xfconf.py +++ b/plugins/module_utils/xfconf.py @@ -7,10 +7,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.parsing.convert_bool import boolean -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt -@fmt.unpack_args +@cmd_runner_fmt.unpack_args def _values_fmt(values, value_types): result = [] for value, value_type in zip(values, value_types): @@ -25,14 +25,21 @@ def xfconf_runner(module, **kwargs): module, command='xfconf-query', arg_formats=dict( - channel=fmt.as_opt_val("--channel"), - property=fmt.as_opt_val("--property"), - force_array=fmt.as_bool("--force-array"), - reset=fmt.as_bool("--reset"), - create=fmt.as_bool("--create"), - list_arg=fmt.as_bool("--list"), - values_and_types=fmt.as_func(_values_fmt), + channel=cmd_runner_fmt.as_opt_val("--channel"), + property=cmd_runner_fmt.as_opt_val("--property"), + force_array=cmd_runner_fmt.as_bool("--force-array"), + reset=cmd_runner_fmt.as_bool("--reset"), + create=cmd_runner_fmt.as_bool("--create"), + list_arg=cmd_runner_fmt.as_bool("--list"), + values_and_types=_values_fmt, + version=cmd_runner_fmt.as_fixed("--version"), ), **kwargs ) return runner + + +def get_xfconf_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return out.splitlines()[0].split()[1] diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index b925e624c8..c13f7b7f45 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -153,10 +153,17 @@ cmd: - string - --set - Pacific/Auckland +version: + description: + - The version of the C(xfconf-query) command. + returned: success + type: str + sample: 4.18.1 + version_added: 10.2.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner +from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner, get_xfconf_version class XFConfProperty(StateModuleHelper): @@ -183,8 +190,8 @@ class XFConfProperty(StateModuleHelper): def __init_module__(self): self.runner = xfconf_runner(self.module) - self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property, - self.vars.channel) + self.vars.version = get_xfconf_version(self.runner) + self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property, self.vars.channel) self.vars.set('previous_value', self._get()) self.vars.set('type', self.vars.value_type) self.vars.set_meta('value', initial_value=self.vars.previous_value) @@ -213,8 +220,7 @@ class XFConfProperty(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set("run_info", ctx.run_info, verbosity=4) self.vars.value = None def state_present(self): @@ -244,8 +250,7 @@ class XFConfProperty(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set("run_info", ctx.run_info, verbosity=4) if not self.vars.is_array: self.vars.value = self.vars.value[0] diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index d8e6acc50d..36de7daecc 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -118,10 +118,17 @@ value_array: - Main - Work - Tmp +version: + description: + - The version of the C(xfconf-query) command. + returned: success + type: str + sample: 4.18.1 + version_added: 10.2.0 """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner +from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner, get_xfconf_version class XFConfInfo(ModuleHelper): @@ -139,6 +146,7 @@ class XFConfInfo(ModuleHelper): def __init_module__(self): self.runner = xfconf_runner(self.module, check_rc=True) + self.vars.version = get_xfconf_version(self.runner) self.vars.set("list_arg", False, output=False) self.vars.set("is_array", False) diff --git a/tests/unit/plugins/modules/test_xfconf.yaml b/tests/unit/plugins/modules/test_xfconf.yaml index 481b090e94..c52c8f7c1b 100644 --- a/tests/unit/plugins/modules/test_xfconf.yaml +++ b/tests/unit/plugins/modules/test_xfconf.yaml @@ -21,11 +21,23 @@ previous_value: '100' type: int value: '90' + version: "4.18.1" mocks: run_command: - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] + - command: [/testbin/xfconf-query, --version] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 + out: &version-output | + xfconf-query 4.18.1 + + Copyright (c) 2008-2023 + The Xfce development team. All rights reserved. + + Please report bugs to . + err: "" + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] + environ: *env-def + rc: 0 out: "100\n" err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity, --create, --type, int, --set, '90'] @@ -45,8 +57,14 @@ previous_value: '90' type: int value: '90' + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: *env-def rc: 0 @@ -69,8 +87,14 @@ previous_value: 'true' type: bool value: 'False' + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit] environ: *env-def rc: 0 @@ -93,8 +117,14 @@ previous_value: [Main, Work, Tmp] type: [string, string, string] value: [A, B, C] + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 @@ -136,8 +166,14 @@ previous_value: [A, B, C] type: [string, string, string] value: [A, B, C] + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 @@ -177,8 +213,14 @@ previous_value: [A, B, C] type: value: + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_xfconf_info.yaml b/tests/unit/plugins/modules/test_xfconf_info.yaml index 535e50602f..8e7ae667c4 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.yaml +++ b/tests/unit/plugins/modules/test_xfconf_info.yaml @@ -6,6 +6,13 @@ --- anchors: environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + out: &version-output | + xfconf-query 4.18.1 + + Copyright (c) 2008-2023 + The Xfce development team. All rights reserved. + + Please report bugs to . test_cases: - id: test_simple_property_get input: @@ -14,8 +21,14 @@ test_cases: output: value: '100' is_array: false + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: *env-def rc: 0 @@ -25,9 +38,15 @@ test_cases: input: channel: xfwm4 property: /general/i_dont_exist - output: {} + output: + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/i_dont_exist] environ: *env-def rc: 1 @@ -39,8 +58,6 @@ test_cases: output: failed: true msg: "missing parameter(s) required by 'property': channel" - mocks: - run_command: [] - id: test_property_get_array input: channel: xfwm4 @@ -48,8 +65,14 @@ test_cases: output: is_array: true value_array: [Main, Work, Tmp] + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 @@ -59,8 +82,14 @@ test_cases: input: {} output: channels: [a, b, c] + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --list] environ: *env-def rc: 0 @@ -77,8 +106,14 @@ test_cases: - /general/wrap_windows - /general/wrap_workspaces - /general/zoom_desktop + version: "4.18.1" mocks: run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: "" - command: [/testbin/xfconf-query, --list, --channel, xfwm4] environ: *env-def rc: 0 From 2005125af4bfc7b45a729151f7a5e202d20369be Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 00:00:30 +1300 Subject: [PATCH 401/482] u[a-s]*: normalize docs (#9338) * u[a-s]*: normalize docs * Update plugins/modules/udm_dns_record.py Co-authored-by: Felix Fontein * Update plugins/modules/udm_dns_record.py Co-authored-by: Felix Fontein * Update plugins/modules/udm_dns_record.py Co-authored-by: Felix Fontein * Update plugins/modules/udm_dns_zone.py Co-authored-by: Felix Fontein * Update plugins/modules/ufw.py Co-authored-by: Felix Fontein * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/udm_dns_record.py | 99 +++-- plugins/modules/udm_dns_zone.py | 152 ++++--- plugins/modules/udm_group.py | 96 +++-- plugins/modules/udm_share.py | 630 +++++++++++++++--------------- plugins/modules/udm_user.py | 550 +++++++++++++------------- plugins/modules/ufw.py | 112 +++--- plugins/modules/uptimerobot.py | 57 ++- plugins/modules/urpmi.py | 25 +- plugins/modules/usb_facts.py | 13 +- 9 files changed, 846 insertions(+), 888 deletions(-) diff --git a/plugins/modules/udm_dns_record.py b/plugins/modules/udm_dns_record.py index 857792993d..a87ce5fede 100644 --- a/plugins/modules/udm_dns_record.py +++ b/plugins/modules/udm_dns_record.py @@ -10,63 +10,60 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_dns_record author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage dns entries on a univention corporate server description: - - "This module allows to manage dns records on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage dns records on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. requirements: - - Univention - - ipaddress (for O(type=ptr_record)) + - Univention + - ipaddress (for O(type=ptr_record)) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns record is present or not. - name: - type: str - required: true - description: - - "Name of the record, this is also the DNS record. E.g. www for - www.example.com." - - For PTR records this has to be the IP address. - zone: - type: str - required: true - description: - - Corresponding DNS zone for this record, e.g. example.com. - - For PTR records this has to be the full reverse zone (for example V(1.1.192.in-addr.arpa)). - type: - type: str - required: true - description: - - "Define the record type. V(host_record) is a A or AAAA record, - V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record) - is a SRV record and V(txt_record) is a TXT record." - - "The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record)." - data: - type: dict - default: {} - description: - - "Additional data for this record, for example V({'a': '192.0.2.1'})." - - Required if O(state=present). -''' + state: + type: str + default: "present" + choices: [present, absent] + description: + - Whether the dns record is present or not. + name: + type: str + required: true + description: + - Name of the record, this is also the DNS record. For example V(www) for www.example.com. + - For PTR records this has to be the IP address. + zone: + type: str + required: true + description: + - Corresponding DNS zone for this record, for example V(example.com). + - For PTR records this has to be the full reverse zone (for example V(1.1.192.in-addr.arpa)). + type: + type: str + required: true + description: + - Define the record type. V(host_record) is a A or AAAA record, V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record) is a SRV + record and V(txt_record) is a TXT record. + - 'The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record).' + data: + type: dict + default: {} + description: + - "Additional data for this record, for example V({'a': '192.0.2.1'})." + - Required if O(state=present). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a DNS record on a UCS community.general.udm_dns_record: name: www @@ -74,8 +71,8 @@ EXAMPLES = ''' type: host_record data: a: - - 192.0.2.1 - - 2001:0db8::42 + - 192.0.2.1 + - 2001:0db8::42 - name: Create a DNS v4 PTR record on a UCS community.general.udm_dns_record: @@ -92,10 +89,10 @@ EXAMPLES = ''' type: ptr_record data: ptr_record: "www.example.com." -''' +""" -RETURN = '''#''' +RETURN = """#""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/udm_dns_zone.py b/plugins/modules/udm_dns_zone.py index 387d5cc45b..765f996aba 100644 --- a/plugins/modules/udm_dns_zone.py +++ b/plugins/modules/udm_dns_zone.py @@ -10,91 +10,87 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_dns_zone author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage dns zones on a univention corporate server description: - - "This module allows to manage dns zones on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage dns zones on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object or + edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns zone is present or not. - type: - type: str - required: true - description: - - Define if the zone is a forward or reverse DNS zone. - - "The available choices are: V(forward_zone), V(reverse_zone)." - zone: - type: str - required: true - description: - - DNS zone name, for example V(example.com). - aliases: [name] - nameserver: - type: list - elements: str - default: [] - description: - - List of appropriate name servers. Required if O(state=present). - interfaces: - type: list - elements: str - default: [] - description: - - List of interface IP addresses, on which the server should - response this zone. Required if O(state=present). - - refresh: - type: int - default: 3600 - description: - - Interval before the zone should be refreshed. - retry: - type: int - default: 1800 - description: - - Interval that should elapse before a failed refresh should be retried. - expire: - type: int - default: 604800 - description: - - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. - ttl: - type: int - default: 600 - description: - - Minimum TTL field that should be exported with any RR from this zone. - - contact: - type: str - default: '' - description: - - Contact person in the SOA record. - mx: - type: list - elements: str - default: [] - description: - - List of MX servers. (Must declared as A or AAAA records). -''' + state: + type: str + default: "present" + choices: [present, absent] + description: + - Whether the DNS zone is present or not. + type: + type: str + required: true + description: + - Define if the zone is a forward or reverse DNS zone. + - 'The available choices are: V(forward_zone), V(reverse_zone).' + zone: + type: str + required: true + description: + - DNS zone name, for example V(example.com). + aliases: [name] + nameserver: + type: list + elements: str + default: [] + description: + - List of appropriate name servers. Required if O(state=present). + interfaces: + type: list + elements: str + default: [] + description: + - List of interface IP addresses, on which the server should response this zone. Required if O(state=present). + refresh: + type: int + default: 3600 + description: + - Interval before the zone should be refreshed. + retry: + type: int + default: 1800 + description: + - Interval that should elapse before a failed refresh should be retried. + expire: + type: int + default: 604800 + description: + - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. + ttl: + type: int + default: 600 + description: + - Minimum TTL field that should be exported with any RR from this zone. + contact: + type: str + default: '' + description: + - Contact person in the SOA record. + mx: + type: list + elements: str + default: [] + description: + - List of MX servers. (Must declared as A or AAAA records). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a DNS zone on a UCS community.general.udm_dns_zone: zone: example.com @@ -103,10 +99,10 @@ EXAMPLES = ''' - ucs.example.com interfaces: - 192.0.2.1 -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_group.py b/plugins/modules/udm_group.py index 5fe2422f8b..238b0182ed 100644 --- a/plugins/modules/udm_group.py +++ b/plugins/modules/udm_group.py @@ -10,63 +10,61 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_group author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage of the posix group description: - - "This module allows to manage user groups on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage user groups on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group is present or not. - type: str - name: - required: true - description: - - Name of the posix group. - type: str + state: + required: false + default: "present" + choices: [present, absent] description: - required: false - description: - - Group description. - type: str - position: - required: false - description: - - define the whole ldap position of the group, e.g. - C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). - type: str - default: '' - ou: - required: false - description: - - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). - type: str - default: '' - subpath: - required: false - description: - - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). - type: str - default: "cn=groups" -''' + - Whether the group is present or not. + type: str + name: + required: true + description: + - Name of the POSIX group. + type: str + description: + required: false + description: + - Group description. + type: str + position: + required: false + description: + - Define the whole LDAP position of the group, for example V(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). + type: str + default: '' + ou: + required: false + description: + - LDAP OU, for example V(school) for LDAP OU V(ou=school,dc=example,dc=com). + type: str + default: '' + subpath: + required: false + description: + - Subpath inside the OU, for example V(cn=classes,cn=students,cn=groups). + type: str + default: "cn=groups" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a POSIX group community.general.udm_group: name: g123m-1A @@ -84,10 +82,10 @@ EXAMPLES = ''' community.general.udm_group: name: g123m-1A position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_share.py b/plugins/modules/udm_share.py index 8ae243b3de..3489607b09 100644 --- a/plugins/modules/udm_share.py +++ b/plugins/modules/udm_share.py @@ -10,339 +10,337 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_share author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage samba shares on a univention corporate server description: - - "This module allows to manage samba shares on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage samba shares on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the share is present or not. - type: str - name: - required: true - description: - - Name - type: str - host: - required: false - description: - - Host FQDN (server which provides the share), for example V({{ ansible_fqdn }}). Required if O(state=present). - type: str - path: - required: false - description: - - Directory on the providing server, for example V(/home). Required if O(state=present). - type: path - sambaName: - required: false - description: - - Windows name. Required if O(state=present). - type: str - aliases: [ samba_name ] - ou: - required: true - description: - - Organisational unit, inside the LDAP Base DN. - type: str - owner: - default: '0' - description: - - Directory owner of the share's root directory. - type: str - group: - default: '0' - description: - - Directory owner group of the share's root directory. - type: str - directorymode: - default: '00755' - description: - - Permissions for the share's root directory. - type: str - root_squash: - default: true - description: - - Modify user ID for root user (root squashing). - type: bool - subtree_checking: - default: true - description: - - Subtree checking. - type: bool - sync: - default: 'sync' - description: - - NFS synchronisation. - type: str - writeable: - default: true - description: - - NFS write access. - type: bool - sambaBlockSize: - description: - - Blocking size. - type: str - aliases: [ samba_block_size ] - sambaBlockingLocks: - default: true - description: - - Blocking locks. - type: bool - aliases: [ samba_blocking_locks ] - sambaBrowseable: - description: - - Show in Windows network environment. - type: bool - default: true - aliases: [ samba_browsable ] - sambaCreateMode: - default: '0744' - description: - - File mode. - type: str - aliases: [ samba_create_mode ] - sambaCscPolicy: - default: 'manual' - description: - - Client-side caching policy. - type: str - aliases: [ samba_csc_policy ] - sambaCustomSettings: - default: [] - description: - - Option name in smb.conf and its value. - type: list - elements: dict - aliases: [ samba_custom_settings ] - sambaDirectoryMode: - default: '0755' - description: - - Directory mode. - type: str - aliases: [ samba_directory_mode ] - sambaDirectorySecurityMode: - default: '0777' - description: - - Directory security mode. - type: str - aliases: [ samba_directory_security_mode ] - sambaDosFilemode: - default: false - description: - - Users with write access may modify permissions. - type: bool - aliases: [ samba_dos_filemode ] - sambaFakeOplocks: - default: false - description: - - Fake oplocks. - type: bool - aliases: [ samba_fake_oplocks ] - sambaForceCreateMode: - default: false - description: - - Force file mode. - type: bool - aliases: [ samba_force_create_mode ] - sambaForceDirectoryMode: - default: false - description: - - Force directory mode. - type: bool - aliases: [ samba_force_directory_mode ] - sambaForceDirectorySecurityMode: - default: false - description: - - Force directory security mode. - type: bool - aliases: [ samba_force_directory_security_mode ] - sambaForceGroup: - description: - - Force group. - type: str - aliases: [ samba_force_group ] - sambaForceSecurityMode: - default: false - description: - - Force security mode. - type: bool - aliases: [ samba_force_security_mode ] - sambaForceUser: - description: - - Force user. - type: str - aliases: [ samba_force_user ] - sambaHideFiles: - description: - - Hide files. - type: str - aliases: [ samba_hide_files ] - sambaHideUnreadable: - default: false - description: - - Hide unreadable files/directories. - type: bool - aliases: [ samba_hide_unreadable ] - sambaHostsAllow: - default: [] - description: - - Allowed host/network. - type: list - elements: str - aliases: [ samba_hosts_allow ] - sambaHostsDeny: - default: [] - description: - - Denied host/network. - type: list - elements: str - aliases: [ samba_hosts_deny ] - sambaInheritAcls: - default: true - description: - - Inherit ACLs. - type: bool - aliases: [ samba_inherit_acls ] - sambaInheritOwner: - default: false - description: - - Create files/directories with the owner of the parent directory. - type: bool - aliases: [ samba_inherit_owner ] - sambaInheritPermissions: - default: false - description: - - Create files/directories with permissions of the parent directory. - type: bool - aliases: [ samba_inherit_permissions ] - sambaInvalidUsers: - description: - - Invalid users or groups. - type: str - aliases: [ samba_invalid_users ] - sambaLevel2Oplocks: - default: true - description: - - Level 2 oplocks. - type: bool - aliases: [ samba_level_2_oplocks ] - sambaLocking: - default: true - description: - - Locking. - type: bool - aliases: [ samba_locking ] - sambaMSDFSRoot: - default: false - description: - - MSDFS root. - type: bool - aliases: [ samba_msdfs_root ] - sambaNtAclSupport: - default: true - description: - - NT ACL support. - type: bool - aliases: [ samba_nt_acl_support ] - sambaOplocks: - default: true - description: - - Oplocks. - type: bool - aliases: [ samba_oplocks ] - sambaPostexec: - description: - - Postexec script. - type: str - aliases: [ samba_postexec ] - sambaPreexec: - description: - - Preexec script. - type: str - aliases: [ samba_preexec ] - sambaPublic: - default: false - description: - - Allow anonymous read-only access with a guest user. - type: bool - aliases: [ samba_public ] - sambaSecurityMode: - default: '0777' - description: - - Security mode. - type: str - aliases: [ samba_security_mode ] - sambaStrictLocking: - default: 'Auto' - description: - - Strict locking. - type: str - aliases: [ samba_strict_locking ] - sambaVFSObjects: - description: - - VFS objects. - type: str - aliases: [ samba_vfs_objects ] - sambaValidUsers: - description: - - Valid users or groups. - type: str - aliases: [ samba_valid_users ] - sambaWriteList: - description: - - Restrict write access to these users/groups. - type: str - aliases: [ samba_write_list ] - sambaWriteable: - default: true - description: - - Samba write access. - type: bool - aliases: [ samba_writeable ] - nfs_hosts: - default: [] - description: - - Only allow access for this host, IP address or network. - type: list - elements: str - nfsCustomSettings: - default: [] - description: - - Option name in exports file. - type: list - elements: str - aliases: [ nfs_custom_settings ] -''' + state: + default: "present" + choices: [present, absent] + description: + - Whether the share is present or not. + type: str + name: + required: true + description: + - Name. + type: str + host: + required: false + description: + - Host FQDN (server which provides the share), for example V({{ ansible_fqdn }}). Required if O(state=present). + type: str + path: + required: false + description: + - Directory on the providing server, for example V(/home). Required if O(state=present). + type: path + sambaName: + required: false + description: + - Windows name. Required if O(state=present). + type: str + aliases: [samba_name] + ou: + required: true + description: + - Organisational unit, inside the LDAP Base DN. + type: str + owner: + default: '0' + description: + - Directory owner of the share's root directory. + type: str + group: + default: '0' + description: + - Directory owner group of the share's root directory. + type: str + directorymode: + default: '00755' + description: + - Permissions for the share's root directory. + type: str + root_squash: + default: true + description: + - Modify user ID for root user (root squashing). + type: bool + subtree_checking: + default: true + description: + - Subtree checking. + type: bool + sync: + default: 'sync' + description: + - NFS synchronisation. + type: str + writeable: + default: true + description: + - NFS write access. + type: bool + sambaBlockSize: + description: + - Blocking size. + type: str + aliases: [samba_block_size] + sambaBlockingLocks: + default: true + description: + - Blocking locks. + type: bool + aliases: [samba_blocking_locks] + sambaBrowseable: + description: + - Show in Windows network environment. + type: bool + default: true + aliases: [samba_browsable] + sambaCreateMode: + default: '0744' + description: + - File mode. + type: str + aliases: [samba_create_mode] + sambaCscPolicy: + default: 'manual' + description: + - Client-side caching policy. + type: str + aliases: [samba_csc_policy] + sambaCustomSettings: + default: [] + description: + - Option name in smb.conf and its value. + type: list + elements: dict + aliases: [samba_custom_settings] + sambaDirectoryMode: + default: '0755' + description: + - Directory mode. + type: str + aliases: [samba_directory_mode] + sambaDirectorySecurityMode: + default: '0777' + description: + - Directory security mode. + type: str + aliases: [samba_directory_security_mode] + sambaDosFilemode: + default: false + description: + - Users with write access may modify permissions. + type: bool + aliases: [samba_dos_filemode] + sambaFakeOplocks: + default: false + description: + - Fake oplocks. + type: bool + aliases: [samba_fake_oplocks] + sambaForceCreateMode: + default: false + description: + - Force file mode. + type: bool + aliases: [samba_force_create_mode] + sambaForceDirectoryMode: + default: false + description: + - Force directory mode. + type: bool + aliases: [samba_force_directory_mode] + sambaForceDirectorySecurityMode: + default: false + description: + - Force directory security mode. + type: bool + aliases: [samba_force_directory_security_mode] + sambaForceGroup: + description: + - Force group. + type: str + aliases: [samba_force_group] + sambaForceSecurityMode: + default: false + description: + - Force security mode. + type: bool + aliases: [samba_force_security_mode] + sambaForceUser: + description: + - Force user. + type: str + aliases: [samba_force_user] + sambaHideFiles: + description: + - Hide files. + type: str + aliases: [samba_hide_files] + sambaHideUnreadable: + default: false + description: + - Hide unreadable files/directories. + type: bool + aliases: [samba_hide_unreadable] + sambaHostsAllow: + default: [] + description: + - Allowed host/network. + type: list + elements: str + aliases: [samba_hosts_allow] + sambaHostsDeny: + default: [] + description: + - Denied host/network. + type: list + elements: str + aliases: [samba_hosts_deny] + sambaInheritAcls: + default: true + description: + - Inherit ACLs. + type: bool + aliases: [samba_inherit_acls] + sambaInheritOwner: + default: false + description: + - Create files/directories with the owner of the parent directory. + type: bool + aliases: [samba_inherit_owner] + sambaInheritPermissions: + default: false + description: + - Create files/directories with permissions of the parent directory. + type: bool + aliases: [samba_inherit_permissions] + sambaInvalidUsers: + description: + - Invalid users or groups. + type: str + aliases: [samba_invalid_users] + sambaLevel2Oplocks: + default: true + description: + - Level 2 oplocks. + type: bool + aliases: [samba_level_2_oplocks] + sambaLocking: + default: true + description: + - Locking. + type: bool + aliases: [samba_locking] + sambaMSDFSRoot: + default: false + description: + - MSDFS root. + type: bool + aliases: [samba_msdfs_root] + sambaNtAclSupport: + default: true + description: + - NT ACL support. + type: bool + aliases: [samba_nt_acl_support] + sambaOplocks: + default: true + description: + - Oplocks. + type: bool + aliases: [samba_oplocks] + sambaPostexec: + description: + - Postexec script. + type: str + aliases: [samba_postexec] + sambaPreexec: + description: + - Preexec script. + type: str + aliases: [samba_preexec] + sambaPublic: + default: false + description: + - Allow anonymous read-only access with a guest user. + type: bool + aliases: [samba_public] + sambaSecurityMode: + default: '0777' + description: + - Security mode. + type: str + aliases: [samba_security_mode] + sambaStrictLocking: + default: 'Auto' + description: + - Strict locking. + type: str + aliases: [samba_strict_locking] + sambaVFSObjects: + description: + - VFS objects. + type: str + aliases: [samba_vfs_objects] + sambaValidUsers: + description: + - Valid users or groups. + type: str + aliases: [samba_valid_users] + sambaWriteList: + description: + - Restrict write access to these users/groups. + type: str + aliases: [samba_write_list] + sambaWriteable: + default: true + description: + - Samba write access. + type: bool + aliases: [samba_writeable] + nfs_hosts: + default: [] + description: + - Only allow access for this host, IP address or network. + type: list + elements: str + nfsCustomSettings: + default: [] + description: + - Option name in exports file. + type: list + elements: str + aliases: [nfs_custom_settings] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a share named home on the server ucs.example.com with the path /home community.general.udm_share: name: home path: /home host: ucs.example.com sambaName: Home -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index 5257a22028..bb431ca75f 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -10,297 +10,285 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: udm_user author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage posix users on a univention corporate server description: - - "This module allows to manage posix users on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage posix users on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. notes: - - This module requires the deprecated L(crypt Python module, - https://docs.python.org/3.12/library/crypt.html) library which was removed from Python 3.13. - For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which was removed from + Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). requirements: - - legacycrypt (on Python 3.13 or newer) + - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the user is present or not. - type: str - username: - required: true - description: - - User name - aliases: ['name'] - type: str - firstname: - description: - - First name. Required if O(state=present). - type: str - lastname: - description: - - Last name. Required if O(state=present). - type: str - password: - description: - - Password. Required if O(state=present). - type: str - birthday: - description: - - Birthday - type: str - city: - description: - - City of users business address. - type: str - country: - description: - - Country of users business address. - type: str - department_number: - description: - - Department number of users business address. - aliases: [ departmentNumber ] - type: str + state: + default: "present" + choices: [present, absent] description: - description: - - Description (not gecos) - type: str - display_name: - description: - - Display name (not gecos) - aliases: [ displayName ] - type: str - email: - default: [''] - description: - - A list of e-mail addresses. - type: list - elements: str - employee_number: - description: - - Employee number - aliases: [ employeeNumber ] - type: str - employee_type: - description: - - Employee type - aliases: [ employeeType ] - type: str - gecos: - description: - - GECOS - type: str - groups: - default: [] - description: - - "POSIX groups, the LDAP DNs of the groups will be found with the - LDAP filter for each group as $GROUP: - V((&(objectClass=posixGroup\\)(cn=$GROUP\\)\\))." - type: list - elements: str - home_share: - description: - - "Home NFS share. Must be a LDAP DN, e.g. - V(cn=home,cn=shares,ou=school,dc=example,dc=com)." - aliases: [ homeShare ] - type: str - home_share_path: - description: - - Path to home NFS share, inside the homeShare. - aliases: [ homeSharePath ] - type: str - home_telephone_number: - default: [] - description: - - List of private telephone numbers. - aliases: [ homeTelephoneNumber ] - type: list - elements: str - homedrive: - description: - - Windows home drive, for example V("H:"). - type: str - mail_alternative_address: - default: [] - description: - - List of alternative e-mail addresses. - aliases: [ mailAlternativeAddress ] - type: list - elements: str - mail_home_server: - description: - - FQDN of mail server - aliases: [ mailHomeServer ] - type: str - mail_primary_address: - description: - - Primary e-mail address - aliases: [ mailPrimaryAddress ] - type: str - mobile_telephone_number: - default: [] - description: - - Mobile phone number - aliases: [ mobileTelephoneNumber ] - type: list - elements: str - organisation: - description: - - Organisation - aliases: [ organization ] - type: str - overridePWHistory: - type: bool - default: false - description: - - Override password history - aliases: [ override_pw_history ] - overridePWLength: - type: bool - default: false - description: - - Override password check - aliases: [ override_pw_length ] - pager_telephonenumber: - default: [] - description: - - List of pager telephone numbers. - aliases: [ pagerTelephonenumber ] - type: list - elements: str - phone: - description: - - List of telephone numbers. - type: list - elements: str - default: [] - postcode: - description: - - Postal code of users business address. - type: str - primary_group: - description: - - Primary group. This must be the group LDAP DN. - - If not specified, it defaults to V(cn=Domain Users,cn=groups,$LDAP_BASE_DN). - aliases: [ primaryGroup ] - type: str - profilepath: - description: - - Windows profile directory - type: str - pwd_change_next_login: - choices: [ '0', '1' ] - description: - - Change password on next login. - aliases: [ pwdChangeNextLogin ] - type: str - room_number: - description: - - Room number of users business address. - aliases: [ roomNumber ] - type: str - samba_privileges: - description: - - "Samba privilege, like allow printer administration, do domain - join." - aliases: [ sambaPrivileges ] - type: list - elements: str - default: [] - samba_user_workstations: - description: - - Allow the authentication only on this Microsoft Windows host. - aliases: [ sambaUserWorkstations ] - type: list - elements: str - default: [] - sambahome: - description: - - Windows home path, for example V('\\\\$FQDN\\$USERNAME'). - type: str - scriptpath: - description: - - Windows logon script. - type: str - secretary: - default: [] - description: - - A list of superiors as LDAP DNs. - type: list - elements: str - serviceprovider: - default: [''] - description: - - Enable user for the following service providers. - type: list - elements: str - shell: - default: '/bin/bash' - description: - - Login shell - type: str - street: - description: - - Street of users business address. - type: str - title: - description: - - Title, for example V(Prof.). - type: str - unixhome: - description: - - Unix home directory - - If not specified, it defaults to C(/home/$USERNAME). - type: str - userexpiry: - description: - - Account expiry date, for example V(1999-12-31). - - If not specified, it defaults to the current day plus one year. - type: str - position: - default: '' - description: - - "Define the whole position of users object inside the LDAP tree, - for example V(cn=employee,cn=users,ou=school,dc=example,dc=com)." - type: str - update_password: - default: always - choices: [ always, on_create ] - description: - - "V(always) will update passwords if they differ. - V(on_create) will only set the password for newly created users." - type: str - ou: - default: '' - description: - - "Organizational Unit inside the LDAP Base DN, for example V(school) for - LDAP OU C(ou=school,dc=example,dc=com)." - type: str - subpath: - default: 'cn=users' - description: - - "LDAP subpath inside the organizational unit, for example - V(cn=teachers,cn=users) for LDAP container - C(cn=teachers,cn=users,dc=example,dc=com)." - type: str -''' + - Whether the user is present or not. + type: str + username: + required: true + description: + - User name. + aliases: ['name'] + type: str + firstname: + description: + - First name. Required if O(state=present). + type: str + lastname: + description: + - Last name. Required if O(state=present). + type: str + password: + description: + - Password. Required if O(state=present). + type: str + birthday: + description: + - Birthday. + type: str + city: + description: + - City of users business address. + type: str + country: + description: + - Country of users business address. + type: str + department_number: + description: + - Department number of users business address. + aliases: [departmentNumber] + type: str + description: + description: + - Description (not gecos). + type: str + display_name: + description: + - Display name (not gecos). + aliases: [displayName] + type: str + email: + default: [''] + description: + - A list of e-mail addresses. + type: list + elements: str + employee_number: + description: + - Employee number. + aliases: [employeeNumber] + type: str + employee_type: + description: + - Employee type. + aliases: [employeeType] + type: str + gecos: + description: + - GECOS. + type: str + groups: + default: [] + description: + - 'POSIX groups, the LDAP DNs of the groups will be found with the LDAP filter for each group as $GROUP: V((&(objectClass=posixGroup\)(cn=$GROUP\)\)).' + type: list + elements: str + home_share: + description: + - Home NFS share. Must be a LDAP DN, for example V(cn=home,cn=shares,ou=school,dc=example,dc=com). + aliases: [homeShare] + type: str + home_share_path: + description: + - Path to home NFS share, inside the homeShare. + aliases: [homeSharePath] + type: str + home_telephone_number: + default: [] + description: + - List of private telephone numbers. + aliases: [homeTelephoneNumber] + type: list + elements: str + homedrive: + description: + - Windows home drive, for example V("H:"). + type: str + mail_alternative_address: + default: [] + description: + - List of alternative e-mail addresses. + aliases: [mailAlternativeAddress] + type: list + elements: str + mail_home_server: + description: + - FQDN of mail server. + aliases: [mailHomeServer] + type: str + mail_primary_address: + description: + - Primary e-mail address. + aliases: [mailPrimaryAddress] + type: str + mobile_telephone_number: + default: [] + description: + - Mobile phone number. + aliases: [mobileTelephoneNumber] + type: list + elements: str + organisation: + description: + - Organisation. + aliases: [organization] + type: str + overridePWHistory: + type: bool + default: false + description: + - Override password history. + aliases: [override_pw_history] + overridePWLength: + type: bool + default: false + description: + - Override password check. + aliases: [override_pw_length] + pager_telephonenumber: + default: [] + description: + - List of pager telephone numbers. + aliases: [pagerTelephonenumber] + type: list + elements: str + phone: + description: + - List of telephone numbers. + type: list + elements: str + default: [] + postcode: + description: + - Postal code of users business address. + type: str + primary_group: + description: + - Primary group. This must be the group LDAP DN. + - If not specified, it defaults to V(cn=Domain Users,cn=groups,$LDAP_BASE_DN). + aliases: [primaryGroup] + type: str + profilepath: + description: + - Windows profile directory. + type: str + pwd_change_next_login: + choices: ['0', '1'] + description: + - Change password on next login. + aliases: [pwdChangeNextLogin] + type: str + room_number: + description: + - Room number of users business address. + aliases: [roomNumber] + type: str + samba_privileges: + description: + - Samba privilege, like allow printer administration, do domain join. + aliases: [sambaPrivileges] + type: list + elements: str + default: [] + samba_user_workstations: + description: + - Allow the authentication only on this Microsoft Windows host. + aliases: [sambaUserWorkstations] + type: list + elements: str + default: [] + sambahome: + description: + - Windows home path, for example V('\\\\$FQDN\\$USERNAME'). + type: str + scriptpath: + description: + - Windows logon script. + type: str + secretary: + default: [] + description: + - A list of superiors as LDAP DNs. + type: list + elements: str + serviceprovider: + default: [''] + description: + - Enable user for the following service providers. + type: list + elements: str + shell: + default: '/bin/bash' + description: + - Login shell. + type: str + street: + description: + - Street of users business address. + type: str + title: + description: + - Title, for example V(Prof.). + type: str + unixhome: + description: + - Unix home directory. + - If not specified, it defaults to C(/home/$USERNAME). + type: str + userexpiry: + description: + - Account expiry date, for example V(1999-12-31). + - If not specified, it defaults to the current day plus one year. + type: str + position: + default: '' + description: + - Define the whole position of users object inside the LDAP tree, for example V(cn=employee,cn=users,ou=school,dc=example,dc=com). + type: str + update_password: + default: always + choices: [always, on_create] + description: + - V(always) will update passwords if they differ. V(on_create) will only set the password for newly created users. + type: str + ou: + default: '' + description: + - Organizational Unit inside the LDAP Base DN, for example V(school) for LDAP OU C(ou=school,dc=example,dc=com). + type: str + subpath: + default: 'cn=users' + description: + - LDAP subpath inside the organizational unit, for example V(cn=teachers,cn=users) for LDAP container C(cn=teachers,cn=users,dc=example,dc=com). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a user on a UCS community.general.udm_user: name: FooBar @@ -325,10 +313,10 @@ EXAMPLES = ''' firstname: Foo lastname: Bar position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' -''' +""" -RETURN = '''# ''' +RETURN = """# """ from datetime import date, timedelta import traceback diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index fba0ef5fe2..e0d765eeac 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -11,26 +11,24 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ufw short_description: Manage firewall with UFW description: - - Manage firewall with UFW. + - Manage firewall with UFW. author: - - Aleksey Ovcharenko (@ovcharenko) - - Jarno Keskikangas (@pyykkis) - - Ahti Kitsik (@ahtik) + - Aleksey Ovcharenko (@ovcharenko) + - Jarno Keskikangas (@pyykkis) + - Ahti Kitsik (@ahtik) notes: - - See C(man ufw) for more examples. - - > - B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, - as firewall rules are meant to be ordered and parallel executions do not guarantee order. - B(Do not use concurrency:) The results are unpredictable and the module may fail silently if you do. + - See C(man ufw) for more examples. + - "B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, as firewall rules are meant + to be ordered and parallel executions do not guarantee order. B(Do not use concurrency:) The results are unpredictable and the module may + fail silently if you do." requirements: - - C(ufw) package + - C(ufw) package extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -44,59 +42,52 @@ options: - V(reloaded) reloads firewall. - V(reset) disables and resets firewall to installation defaults. type: str - choices: [ disabled, enabled, reloaded, reset ] + choices: [disabled, enabled, reloaded, reset] default: description: - Change the default policy for incoming or outgoing traffic. type: str - choices: [ allow, deny, reject ] - aliases: [ policy ] + choices: [allow, deny, reject] + aliases: [policy] direction: description: - - Select direction for a rule or default policy command. Mutually - exclusive with O(interface_in) and O(interface_out). + - Select direction for a rule or default policy command. Mutually exclusive with O(interface_in) and O(interface_out). type: str - choices: [ in, incoming, out, outgoing, routed ] + choices: [in, incoming, out, outgoing, routed] logging: description: - Toggles logging. Logged packets use the LOG_KERN syslog facility. type: str - choices: [ 'on', 'off', low, medium, high, full ] + choices: ['on', 'off', low, medium, high, full] insert: description: - Insert the corresponding rule as rule number NUM. - Note that ufw numbers rules starting with 1. - - If O(delete=true) and a value is provided for O(insert), - then O(insert) is ignored. + - If O(delete=true) and a value is provided for O(insert), then O(insert) is ignored. type: int insert_relative_to: description: - Allows to interpret the index in O(insert) relative to a position. - - V(zero) interprets the rule number as an absolute index (i.e. 1 is - the first rule). - - V(first-ipv4) interprets the rule number relative to the index of the - first IPv4 rule, or relative to the position where the first IPv4 rule + - V(zero) interprets the rule number as an absolute index (that is, 1 is the first rule). + - V(first-ipv4) interprets the rule number relative to the index of the first IPv4 rule, or relative to the position where the first IPv4 + rule would be if there is currently none. + - V(last-ipv4) interprets the rule number relative to the index of the last IPv4 rule, or relative to the position where the last IPv4 rule would be if there is currently none. - - V(last-ipv4) interprets the rule number relative to the index of the - last IPv4 rule, or relative to the position where the last IPv4 rule - would be if there is currently none. - - V(first-ipv6) interprets the rule number relative to the index of the - first IPv6 rule, or relative to the position where the first IPv6 rule - would be if there is currently none. - - V(last-ipv6) interprets the rule number relative to the index of the - last IPv6 rule, or relative to the position where the last IPv6 rule + - V(first-ipv6) interprets the rule number relative to the index of the first IPv6 rule, or relative to the position where the first IPv6 + rule would be if there is currently none. + - V(last-ipv6) interprets the rule number relative to the index of the last IPv6 rule, or relative to the position where the last IPv6 rule would be if there is currently none. type: str - choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ] + choices: [first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero] default: zero rule: description: - - Add firewall rule + - Add firewall rule. type: str - choices: [ allow, deny, limit, reject ] + choices: [allow, deny, limit, reject] log: description: - - Log new connections matched to this rule + - Log new connections matched to this rule. type: bool default: false from_ip: @@ -104,7 +95,7 @@ options: - Source IP address. type: str default: any - aliases: [ from, src ] + aliases: [from, src] from_port: description: - Source port. @@ -114,54 +105,49 @@ options: - Destination IP address. type: str default: any - aliases: [ dest, to] + aliases: [dest, to] to_port: description: - Destination port. type: str - aliases: [ port ] + aliases: [port] proto: description: - TCP/IP protocol. type: str - choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ] - aliases: [ protocol ] + choices: [any, tcp, udp, ipv6, esp, ah, gre, igmp] + aliases: [protocol] name: description: - Use profile located in C(/etc/ufw/applications.d). type: str - aliases: [ app ] + aliases: [app] delete: description: - Delete rule. - - If O(delete=true) and a value is provided for O(insert), - then O(insert) is ignored. + - If O(delete=true) and a value is provided for O(insert), then O(insert) is ignored. type: bool default: false interface: description: - - Specify interface for the rule. The direction (in or out) used - for the interface depends on the value of O(direction). See - O(interface_in) and O(interface_out) for routed rules that needs - to supply both an input and output interface. Mutually - exclusive with O(interface_in) and O(interface_out). + - Specify interface for the rule. The direction (in or out) used for the interface depends on the value of O(direction). See O(interface_in) + and O(interface_out) for routed rules that needs to supply both an input and output interface. Mutually exclusive with O(interface_in) + and O(interface_out). type: str - aliases: [ if ] + aliases: [if] interface_in: description: - - Specify input interface for the rule. This is mutually - exclusive with O(direction) and O(interface). However, it is - compatible with O(interface_out) for routed rules. + - Specify input interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_out) + for routed rules. type: str - aliases: [ if_in ] + aliases: [if_in] version_added: '0.2.0' interface_out: description: - - Specify output interface for the rule. This is mutually - exclusive with O(direction) and O(interface). However, it is - compatible with O(interface_in) for routed rules. + - Specify output interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_in) + for routed rules. type: str - aliases: [ if_out ] + aliases: [if_out] version_added: '0.2.0' route: description: @@ -172,9 +158,9 @@ options: description: - Add a comment to the rule. Requires UFW version >=0.35. type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow everything and enable UFW community.general.ufw: state: enabled @@ -299,7 +285,7 @@ EXAMPLES = r''' route: true src: 192.0.2.0/24 dest: 198.51.100.0/24 -''' +""" import re diff --git a/plugins/modules/uptimerobot.py b/plugins/modules/uptimerobot.py index c1894e90a0..ed6b6431f1 100644 --- a/plugins/modules/uptimerobot.py +++ b/plugins/modules/uptimerobot.py @@ -8,44 +8,43 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: uptimerobot short_description: Pause and start Uptime Robot monitoring description: - - This module will let you start and pause Uptime Robot Monitoring + - This module will let you start and pause Uptime Robot Monitoring. author: "Nate Kingsley (@nate-kingsley)" requirements: - - Valid Uptime Robot API Key + - Valid Uptime Robot API Key extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Define whether or not the monitor should be running or paused. - required: true - choices: [ "started", "paused" ] - monitorid: - type: str - description: - - ID of the monitor to check. - required: true - apikey: - type: str - description: - - Uptime Robot API key. - required: true + state: + type: str + description: + - Define whether or not the monitor should be running or paused. + required: true + choices: ["started", "paused"] + monitorid: + type: str + description: + - ID of the monitor to check. + required: true + apikey: + type: str + description: + - Uptime Robot API key. + required: true notes: - - Support for adding and removing monitors and alert contacts has not yet been implemented. -''' + - Support for adding and removing monitors and alert contacts has not yet been implemented. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Pause the monitor with an ID of 12345 community.general.uptimerobot: monitorid: 12345 @@ -57,7 +56,7 @@ EXAMPLES = ''' monitorid: 12345 apikey: 12345-1234512345 state: started -''' +""" import json diff --git a/plugins/modules/urpmi.py b/plugins/modules/urpmi.py index 75c0af90fc..9c08a22c21 100644 --- a/plugins/modules/urpmi.py +++ b/plugins/modules/urpmi.py @@ -11,12 +11,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: urpmi short_description: Urpmi manager description: - - Manages packages with C(urpmi) (such as for Mageia or Mandriva) + - Manages packages with C(urpmi) (such as for Mageia or Mandriva). extends_documentation_fragment: - community.general.attributes attributes: @@ -29,13 +28,13 @@ options: description: - A list of package names to install, upgrade or remove. required: true - aliases: [ package, pkg ] + aliases: [package, pkg] type: list elements: str state: description: - Indicates the desired package state. - choices: [ absent, present, installed, removed ] + choices: [absent, present, installed, removed] default: present type: str update_cache: @@ -50,21 +49,19 @@ options: default: true force: description: - - Assume "yes" is the answer to any question urpmi has to ask. - Corresponds to the C(--force) option for C(urpmi). + - Assume "yes" is the answer to any question urpmi has to ask. Corresponds to the C(--force) option for C(urpmi). type: bool default: true root: description: - - Specifies an alternative install root, relative to which all packages will be installed. - Corresponds to the C(--root) option for C(urpmi). - aliases: [ installroot ] + - Specifies an alternative install root, relative to which all packages will be installed. Corresponds to the C(--root) option for C(urpmi). + aliases: [installroot] type: str author: -- Philippe Makowski (@pmakowski) -''' + - Philippe Makowski (@pmakowski) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.urpmi: pkg: foo @@ -85,7 +82,7 @@ EXAMPLES = ''' name: bar state: present update_cache: true -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/usb_facts.py b/plugins/modules/usb_facts.py index 340c71ee54..4f0195bde3 100644 --- a/plugins/modules/usb_facts.py +++ b/plugins/modules/usb_facts.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: usb_facts short_description: Allows listing information about USB devices version_added: 8.5.0 @@ -25,9 +24,9 @@ extends_documentation_fragment: - community.general.attributes.facts_module requirements: - lsusb binary on PATH (usually installed through the package usbutils and preinstalled on many systems) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about USB devices community.general.usb_facts: @@ -35,9 +34,9 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "On bus {{ item.bus }} device {{ item.device }} with id {{ item.id }} is {{ item.name }}" loop: "{{ ansible_facts.usb_devices }}" -''' +""" -RETURN = r''' +RETURN = r""" ansible_facts: description: Dictionary containing details of connected USB devices. returned: always @@ -69,7 +68,7 @@ ansible_facts: returned: always type: str sample: Linux Foundation 2.0 root hub -''' +""" import re from ansible.module_utils.basic import AnsibleModule From 6cd3f79e19b00a485132a4e6281656b8e5e6e10c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 21:48:06 +1300 Subject: [PATCH 402/482] lookup plugins: use f-strings (#9324) * lookup plugins: use f-strings * add changelog frag * manual change for few occurrences * Update plugins/lookup/dependent.py Co-authored-by: Felix Fontein * adjustment from review * no f-string for you * Update plugins/lookup/dependent.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/9324-fstr-lookup-plugins.yml | 29 +++++++++++ plugins/lookup/bitwarden.py | 2 +- plugins/lookup/chef_databag.py | 4 +- plugins/lookup/collection_version.py | 6 +-- plugins/lookup/consul_kv.py | 4 +- plugins/lookup/credstash.py | 4 +- plugins/lookup/cyberarkpassword.py | 14 +++--- plugins/lookup/dependent.py | 17 +++---- plugins/lookup/dig.py | 20 ++++---- plugins/lookup/dnstxt.py | 2 +- plugins/lookup/dsv.py | 8 ++-- plugins/lookup/etcd.py | 4 +- plugins/lookup/etcd3.py | 8 ++-- plugins/lookup/filetree.py | 10 ++-- plugins/lookup/github_app_access_token.py | 10 ++-- plugins/lookup/hiera.py | 3 +- plugins/lookup/keyring.py | 6 +-- plugins/lookup/lastpass.py | 4 +- plugins/lookup/lmdb_kv.py | 2 +- plugins/lookup/manifold.py | 40 ++++++++-------- plugins/lookup/merge_variables.py | 9 ++-- plugins/lookup/onepassword.py | 24 +++++----- plugins/lookup/onepassword_doc.py | 2 +- plugins/lookup/passwordstore.py | 48 ++++++++----------- plugins/lookup/random_pet.py | 2 +- plugins/lookup/redis.py | 2 +- plugins/lookup/revbitspss.py | 4 +- plugins/lookup/shelvefile.py | 6 +-- plugins/lookup/tss.py | 18 +++---- tests/sanity/ignore-2.15.txt | 1 + tests/sanity/ignore-2.16.txt | 1 + 31 files changed, 165 insertions(+), 149 deletions(-) create mode 100644 changelogs/fragments/9324-fstr-lookup-plugins.yml diff --git a/changelogs/fragments/9324-fstr-lookup-plugins.yml b/changelogs/fragments/9324-fstr-lookup-plugins.yml new file mode 100644 index 0000000000..a448ae0d48 --- /dev/null +++ b/changelogs/fragments/9324-fstr-lookup-plugins.yml @@ -0,0 +1,29 @@ +minor_changes: + - bitwarden lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - chef_databag lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - collection_version lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - consul_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - credstash lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - cyberarkpassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - dependent lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - dig lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - dnstxt lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - dsv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - etcd lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - etcd3 lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - filetree lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - github_app_access_token lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - hiera lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - keyring lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - lastpass lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - lmdb_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - manifold lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - merge_variables lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - onepassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - onepassword_doc lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - passwordstore lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - random_pet lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - redis lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - revbitspss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - shelvefile lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). + - tss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 5e31cc6f89..9a8b5749c2 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -207,7 +207,7 @@ class Bitwarden(object): continue if matches and not field_matches: - raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) + raise AnsibleError(f"field {field} does not exist in {search_value}") return field_matches diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index a116b21e5f..eaa6a1aefa 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -81,11 +81,11 @@ class LookupModule(LookupBase): setattr(self, arg, parsed) except ValueError: raise AnsibleError( - "can't parse arg {0}={1} as string".format(arg, arg_raw) + f"can't parse arg {arg}={arg_raw} as string" ) if args: raise AnsibleError( - "unrecognized arguments to with_sequence: %r" % list(args.keys()) + f"unrecognized arguments to with_sequence: {list(args.keys())!r}" ) def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 0f93c03c26..28a9c34420 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -115,10 +115,10 @@ class LookupModule(LookupBase): for term in terms: if not FQCN_RE.match(term): - raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term)) + raise AnsibleLookupError(f'"{term}" is not a FQCN') try: - collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term)) + collection_pkg = import_module(f'ansible_collections.{term}') except ImportError: # Collection not found result.append(not_found) @@ -127,7 +127,7 @@ class LookupModule(LookupBase): try: data = load_collection_meta(collection_pkg, no_version=no_version) except Exception as exc: - raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc)) + raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}') result.append(data.get('version', no_version)) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 79eb65edb1..cf7226d579 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -171,7 +171,7 @@ class LookupModule(LookupBase): values.append(to_text(results[1]['Value'])) except Exception as e: raise AnsibleError( - "Error locating '%s' in kv store. Error was %s" % (term, e)) + f"Error locating '{term}' in kv store. Error was {e}") return values @@ -192,7 +192,7 @@ class LookupModule(LookupBase): if param and len(param) > 0: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) + raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter") paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index fd284f55c8..0700a5ddcb 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -137,8 +137,8 @@ class LookupModule(LookupBase): try: ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass)) except credstash.ItemNotFound: - raise AnsibleError('Key {0} not found'.format(term)) + raise AnsibleError(f'Key {term} not found') except Exception as e: - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 6a08675b3b..e6701c9fb8 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -105,7 +105,7 @@ class CyberarkPassword: self.extra_parms = [] for key, value in kwargs.items(): self.extra_parms.append('-p') - self.extra_parms.append("%s=%s" % (key, value)) + self.extra_parms.append(f"{key}={value}") if self.appid is None: raise AnsibleError("CyberArk Error: No Application ID specified") @@ -130,8 +130,8 @@ class CyberarkPassword: all_parms = [ CLIPASSWORDSDK_CMD, 'GetPassword', - '-p', 'AppDescs.AppID=%s' % self.appid, - '-p', 'Query=%s' % self.query, + '-p', f'AppDescs.AppID={self.appid}', + '-p', f'Query={self.query}', '-o', self.output, '-d', self.b_delimiter] all_parms.extend(self.extra_parms) @@ -144,7 +144,7 @@ class CyberarkPassword: b_credential = to_bytes(tmp_output) if tmp_error: - raise AnsibleError("ERROR => %s " % (tmp_error)) + raise AnsibleError(f"ERROR => {tmp_error} ") if b_credential and b_credential.endswith(b'\n'): b_credential = b_credential[:-1] @@ -164,7 +164,7 @@ class CyberarkPassword: except subprocess.CalledProcessError as e: raise AnsibleError(e.output) except OSError as e: - raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror)) + raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({to_text(e.errno)}) => {e.strerror} ") return [result_dict] @@ -177,11 +177,11 @@ class LookupModule(LookupBase): """ def run(self, terms, variables=None, **kwargs): - display.vvvv("%s" % terms) + display.vvvv(f"{terms}") if isinstance(terms, list): return_values = [] for term in terms: - display.vvvv("Term: %s" % term) + display.vvvv(f"Term: {term}") cyberark_conn = CyberarkPassword(**term) return_values.append(cyberark_conn.get()) return return_values diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 31634e6e6e..1ec4369b32 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -173,8 +173,7 @@ class LookupModule(LookupBase): values = self.__evaluate(expression, templar, variables=vars) except Exception as e: raise AnsibleLookupError( - 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( - error=e, key=key, item=current)) + f'Caught "{e}" while evaluating {key!r} with item == {current!r}') if isinstance(values, Mapping): for idx, val in sorted(values.items()): @@ -186,8 +185,7 @@ class LookupModule(LookupBase): self.__process(result, terms, index + 1, current, templar, variables) else: raise AnsibleLookupError( - 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( - key=key, item=current, type=type(values))) + f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}') def run(self, terms, variables=None, **kwargs): """Generate list.""" @@ -201,16 +199,14 @@ class LookupModule(LookupBase): for index, term in enumerate(terms): if not isinstance(term, Mapping): raise AnsibleLookupError( - 'Parameter {index} must be a dictionary, got {type}'.format( - index=index, type=type(term))) + f'Parameter {index} must be a dictionary, got {type(term)}') if len(term) != 1: raise AnsibleLookupError( - 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( - index=index, count=len(term))) + f'Parameter {index} must be a one-element dictionary, got {len(term)} elements') k, v = list(term.items())[0] if k in vars_so_far: raise AnsibleLookupError( - 'The variable {key!r} appears more than once'.format(key=k)) + f'The variable {k!r} appears more than once') vars_so_far.add(k) if isinstance(v, string_types): data.append((k, v, None)) @@ -218,7 +214,6 @@ class LookupModule(LookupBase): data.append((k, None, v)) else: raise AnsibleLookupError( - 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( - index=index, key=k, type=type(v))) + f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}') self.__process(result, data, 0, {}, templar, variables) return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index aae5ffe834..b03619e15e 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -345,7 +345,7 @@ class LookupModule(LookupBase): try: rdclass = dns.rdataclass.from_text(self.get_option('class')) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {to_native(e)}") myres.retry_servfail = self.get_option('retry_servfail') for t in terms: @@ -363,7 +363,7 @@ class LookupModule(LookupBase): nsaddr = dns.resolver.query(ns)[0].address nameservers.append(nsaddr) except Exception as e: - raise AnsibleError("dns lookup NS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup NS: {to_native(e)}") continue if '=' in t: try: @@ -379,7 +379,7 @@ class LookupModule(LookupBase): try: rdclass = dns.rdataclass.from_text(arg) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {to_native(e)}") elif opt == 'retry_servfail': myres.retry_servfail = boolean(arg) elif opt == 'fail_on_error': @@ -400,7 +400,7 @@ class LookupModule(LookupBase): else: domains.append(t) - # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + # print "--- domain = {domain} qtype={qtype} rdclass={rdclass}" if port: myres.port = port @@ -416,7 +416,7 @@ class LookupModule(LookupBase): except dns.exception.SyntaxError: pass except Exception as e: - raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) + raise AnsibleError(f"dns.reversename unhandled exception {to_native(e)}") domains = reversed_domains if len(domains) > 1: @@ -445,25 +445,25 @@ class LookupModule(LookupBase): ret.append(rd) except Exception as err: if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) + raise AnsibleError(f"Lookup failed: {str(err)}") ret.append(str(err)) except dns.resolver.NXDOMAIN as err: if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) + raise AnsibleError(f"Lookup failed: {str(err)}") if not real_empty: ret.append('NXDOMAIN') except dns.resolver.NoAnswer as err: if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) + raise AnsibleError(f"Lookup failed: {str(err)}") if not real_empty: ret.append("") except dns.resolver.Timeout as err: if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) + raise AnsibleError(f"Lookup failed: {str(err)}") if not real_empty: ret.append("") except dns.exception.DNSException as err: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err)) + raise AnsibleError(f"dns.resolver unhandled exception {to_native(err)}") return ret diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 1ce511b849..296d916368 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -108,7 +108,7 @@ class LookupModule(LookupBase): continue string = '' except DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + raise AnsibleError(f"dns.resolver unhandled exception {to_native(e)}") ret.append(''.join(string)) diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 5e26c43af4..eba3e36368 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -135,17 +135,17 @@ class LookupModule(LookupBase): result = [] for term in terms: - display.debug("dsv_lookup term: %s" % term) + display.debug(f"dsv_lookup term: {term}") try: path = term.lstrip("[/:]") if path == "": - raise AnsibleOptionsError("Invalid secret path: %s" % term) + raise AnsibleOptionsError(f"Invalid secret path: {term}") - display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path) + display.vvv(f"DevOps Secrets Vault GET /secrets/{path}") result.append(vault.get_secret_json(path)) except SecretsVaultError as error: raise AnsibleError( - "DevOps Secrets Vault lookup failure: %s" % error.message + f"DevOps Secrets Vault lookup failure: {error.message}" ) return result diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 1dec890b20..1e7dc3c960 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -104,7 +104,7 @@ class Etcd: def __init__(self, url, version, validate_certs): self.url = url self.version = version - self.baseurl = '%s/%s/keys' % (self.url, self.version) + self.baseurl = f'{self.url}/{self.version}/keys' self.validate_certs = validate_certs def _parse_node(self, node): @@ -125,7 +125,7 @@ class Etcd: return path def get(self, key): - url = "%s/%s?recursive=true" % (self.baseurl, key) + url = f"{self.baseurl}/{key}?recursive=true" data = None value = {} try: diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index 0bda006e34..3bbeb06bb3 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -168,7 +168,7 @@ def etcd3_client(client_params): etcd = etcd3.client(**client_params) etcd.status() except Exception as exp: - raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp))) + raise AnsibleLookupError(f'Cannot connect to etcd cluster: {to_native(exp)}') return etcd @@ -204,7 +204,7 @@ class LookupModule(LookupBase): cnx_log = dict(client_params) if 'password' in cnx_log: cnx_log['password'] = '' - display.verbose("etcd3 connection parameters: %s" % cnx_log) + display.verbose(f"etcd3 connection parameters: {cnx_log}") # connect to etcd3 server etcd = etcd3_client(client_params) @@ -218,12 +218,12 @@ class LookupModule(LookupBase): if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get_prefix: {to_native(exp)}') else: try: val, meta = etcd.get(term) if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get: {to_native(exp)}') return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index ee7bfe27b7..3036e152c2 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -158,7 +158,7 @@ def file_props(root, path): try: st = os.lstat(abspath) except OSError as e: - display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e)) + display.warning(f'filetree: Error using stat() on path {abspath} ({e})') return None ret = dict(root=root, path=path) @@ -172,7 +172,7 @@ def file_props(root, path): ret['state'] = 'file' ret['src'] = abspath else: - display.warning('filetree: Error file type of %s is not supported' % abspath) + display.warning(f'filetree: Error file type of {abspath} is not supported') return None ret['uid'] = st.st_uid @@ -185,7 +185,7 @@ def file_props(root, path): ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) except KeyError: ret['group'] = st.st_gid - ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode)) + ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}' ret['size'] = st.st_size ret['mtime'] = st.st_mtime ret['ctime'] = st.st_ctime @@ -212,7 +212,7 @@ class LookupModule(LookupBase): term_file = os.path.basename(term) dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) path = os.path.join(dwimmed_path, term_file) - display.debug("Walking '{0}'".format(path)) + display.debug(f"Walking '{path}'") for root, dirs, files in os.walk(path, topdown=True): for entry in dirs + files: relpath = os.path.relpath(os.path.join(root, entry), path) @@ -221,7 +221,7 @@ class LookupModule(LookupBase): if relpath not in [entry['path'] for entry in ret]: props = file_props(path, relpath) if props is not None: - display.debug(" found '{0}'".format(os.path.join(path, relpath))) + display.debug(f" found '{os.path.join(path, relpath)}'") ret.append(props) return ret diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index cee635fc0a..73fd09a0a9 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -97,7 +97,7 @@ def read_key(path, private_key=None): with open(path, 'rb') as pem_file: return jwk_from_pem(pem_file.read()) except Exception as e: - raise AnsibleError("Error while parsing key file: {0}".format(e)) + raise AnsibleError(f"Error while parsing key file: {e}") def encode_jwt(app_id, jwk, exp=600): @@ -110,7 +110,7 @@ def encode_jwt(app_id, jwk, exp=600): try: return jwt_instance.encode(payload, jwk, alg='RS256') except Exception as e: - raise AnsibleError("Error while encoding jwt: {0}".format(e)) + raise AnsibleError(f"Error while encoding jwt: {e}") def post_request(generated_jwt, installation_id): @@ -124,19 +124,19 @@ def post_request(generated_jwt, installation_id): except HTTPError as e: try: error_body = json.loads(e.read().decode()) - display.vvv("Error returned: {0}".format(error_body)) + display.vvv(f"Error returned: {error_body}") except Exception: error_body = {} if e.code == 404: raise AnsibleError("Github return error. Please confirm your installationd_id value is valid") elif e.code == 401: raise AnsibleError("Github return error. Please confirm your private key is valid") - raise AnsibleError("Unexpected data returned: {0} -- {1}".format(e, error_body)) + raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() try: json_data = json.loads(response_body.decode('utf-8')) except json.decoder.JSONDecodeError as e: - raise AnsibleError("Error while dencoding JSON respone from github: {0}".format(e)) + raise AnsibleError(f"Error while dencoding JSON respone from github: {e}") return json_data.get('token') diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 02669c98dc..8463a8844e 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -79,8 +79,7 @@ class Hiera(object): pargs.extend(hiera_key) - rc, output, err = run_cmd("{0} -c {1} {2}".format( - self.hiera_bin, self.hiera_cfg, hiera_key[0])) + rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}") return to_text(output.strip()) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index a4c914ed1a..65a3301f2d 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -61,13 +61,13 @@ class LookupModule(LookupBase): self.set_options(var_options=variables, direct=kwargs) - display.vvvv(u"keyring: %s" % keyring.get_keyring()) + display.vvvv(f"keyring: {keyring.get_keyring()}") ret = [] for term in terms: (servicename, username) = (term.split()[0], term.split()[1]) - display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) + display.vvvv(f"username: {username}, servicename: {servicename} ") password = keyring.get_password(servicename, username) if password is None: - raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) + raise AnsibleError(f"servicename: {servicename} for user {username} not found") ret.append(password.rstrip()) return ret diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 8eb3090b76..70ef8d1414 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -83,9 +83,9 @@ class LPass(object): def get_field(self, key, field): if field in ['username', 'password', 'url', 'notes', 'id', 'name']: - out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--{field}", key])) else: - out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--field={field}", key])) return out.strip() diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index a37cff9569..7ff3930bed 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -96,7 +96,7 @@ class LookupModule(LookupBase): try: env = lmdb.open(str(db), readonly=True) except Exception as e: - raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e))) + raise AnsibleError(f"LMDB can't open database {db}: {to_native(e)}") ret = [] if len(terms) == 0: diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py index 049d453e4f..0240aa442b 100644 --- a/plugins/lookup/manifold.py +++ b/plugins/lookup/manifold.py @@ -78,12 +78,14 @@ class ApiError(Exception): class ManifoldApiClient(object): - base_url = 'https://api.{api}.manifold.co/v1/{endpoint}' http_agent = 'python-manifold-ansible-1.0.0' def __init__(self, token): self._token = token + def _make_url(self, api, endpoint): + return f'https://api.{api}.manifold.co/v1/{endpoint}' + def request(self, api, endpoint, *args, **kwargs): """ Send a request to API backend and pre-process a response. @@ -98,11 +100,11 @@ class ManifoldApiClient(object): """ default_headers = { - 'Authorization': "Bearer {0}".format(self._token), + 'Authorization': f"Bearer {self._token}", 'Accept': "*/*" # Otherwise server doesn't set content-type header } - url = self.base_url.format(api=api, endpoint=endpoint) + url = self._make_url(api, endpoint) headers = default_headers arg_headers = kwargs.pop('headers', None) @@ -110,23 +112,22 @@ class ManifoldApiClient(object): headers.update(arg_headers) try: - display.vvvv('manifold lookup connecting to {0}'.format(url)) + display.vvvv(f'manifold lookup connecting to {url}') response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) data = response.read() if response.headers.get('content-type') == 'application/json': data = json.loads(data) return data except ValueError: - raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url)) + raise ApiError(f'JSON response can\'t be parsed while requesting {url}:\n{data}') except HTTPError as e: - raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format( - err=str(e), url=url, response=e.read())) + raise ApiError(f'Server returned: {str(e)} while requesting {url}:\n{e.read()}') except URLError as e: - raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e))) + raise ApiError(f'Failed lookup url for {url} : {str(e)}') except SSLValidationError as e: - raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e))) + raise ApiError(f'Error validating the server\'s certificate for {url}: {str(e)}') except ConnectionError as e: - raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e))) + raise ApiError(f'Error connecting to {url}: {str(e)}') def get_resources(self, team_id=None, project_id=None, label=None): """ @@ -152,7 +153,7 @@ class ManifoldApiClient(object): query_params['label'] = label if query_params: - endpoint += '?' + urlencode(query_params) + endpoint += f"?{urlencode(query_params)}" return self.request(api, endpoint) @@ -188,7 +189,7 @@ class ManifoldApiClient(object): query_params['label'] = label if query_params: - endpoint += '?' + urlencode(query_params) + endpoint += f"?{urlencode(query_params)}" return self.request(api, endpoint) @@ -200,7 +201,7 @@ class ManifoldApiClient(object): :return: """ api = 'marketplace' - endpoint = 'credentials?' + urlencode({'resource_id': resource_id}) + endpoint = f"credentials?{urlencode({'resource_id': resource_id})}" return self.request(api, endpoint) @@ -229,7 +230,7 @@ class LookupModule(LookupBase): if team: team_data = client.get_teams(team) if len(team_data) == 0: - raise AnsibleError("Team '{0}' does not exist".format(team)) + raise AnsibleError(f"Team '{team}' does not exist") team_id = team_data[0]['id'] else: team_id = None @@ -237,7 +238,7 @@ class LookupModule(LookupBase): if project: project_data = client.get_projects(project) if len(project_data) == 0: - raise AnsibleError("Project '{0}' does not exist".format(project)) + raise AnsibleError(f"Project '{project}' does not exist") project_id = project_data[0]['id'] else: project_id = None @@ -252,7 +253,7 @@ class LookupModule(LookupBase): if labels and len(resources_data) < len(labels): fetched_labels = [r['body']['label'] for r in resources_data] not_found_labels = [label for label in labels if label not in fetched_labels] - raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels))) + raise AnsibleError(f"Resource(s) {', '.join(not_found_labels)} do not exist") credentials = {} cred_map = {} @@ -262,17 +263,14 @@ class LookupModule(LookupBase): for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): label = resource['body']['label'] if cred_key in credentials: - display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data " - "with label '{new_label}'".format(cred_key=cred_key, - old_label=cred_map[cred_key], - new_label=label)) + display.warning(f"'{cred_key}' with label '{cred_map[cred_key]}' was replaced by resource data with label '{label}'") credentials[cred_key] = cred_val cred_map[cred_key] = label ret = [credentials] return ret except ApiError as e: - raise AnsibleError('API Error: {0}'.format(str(e))) + raise AnsibleError(f'API Error: {str(e)}') except AnsibleError as e: raise e except Exception: diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index 6287914747..e352524292 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -149,7 +149,7 @@ class LookupModule(LookupBase): ret = [] for term in terms: if not isinstance(term, str): - raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term))) + raise AnsibleError(f"Non-string type '{type(term)}' passed, only 'str' types are allowed!") if not self._groups: # consider only own variables ret.append(self._merge_vars(term, initial_value, variables)) @@ -186,9 +186,9 @@ class LookupModule(LookupBase): return False def _merge_vars(self, search_pattern, initial_value, variables): - display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern)) + display.vvv(f"Merge variables with {self._pattern_type}: {search_pattern}") var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)]) - display.vvv("The following variables will be merged: {0}".format(var_merge_names)) + display.vvv(f"The following variables will be merged: {var_merge_names}") prev_var_type = None result = None @@ -226,8 +226,7 @@ class LookupModule(LookupBase): dest[key] += value else: if (key in dest) and dest[key] != value: - msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format( - key, dest[key], value, ".".join(path)) + msg = f"The key '{key}' with value '{dest[key]}' will be overwritten with value '{value}' from '{'.'.join(path)}.{key}'" if self._override == "error": raise AnsibleError(msg) diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 921cf9acb8..2d9f01e3de 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -140,11 +140,11 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): if missing: prefix = "Unable to sign in to 1Password. Missing required parameter" plural = "" - suffix = ": {params}.".format(params=", ".join(missing)) + suffix = f": {', '.join(missing)}." if len(missing) > 1: plural = "s" - msg = "{prefix}{plural}{suffix}".format(prefix=prefix, plural=plural, suffix=suffix) + msg = f"{prefix}{plural}{suffix}" raise AnsibleLookupError(msg) @abc.abstractmethod @@ -210,12 +210,12 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): try: bin_path = get_bin_path(cls.bin) except ValueError: - raise AnsibleLookupError("Unable to locate '%s' command line tool" % cls.bin) + raise AnsibleLookupError(f"Unable to locate '{cls.bin}' command line tool") try: b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE) except subprocess.CalledProcessError as cpe: - raise AnsibleLookupError("Unable to get the op version: %s" % cpe) + raise AnsibleLookupError(f"Unable to get the op version: {cpe}") return to_text(b_out).strip() @@ -300,7 +300,7 @@ class OnePassCLIv1(OnePassCLIBase): if self.account_id: args.extend(["--account", self.account_id]) elif self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) rc, out, err = self._run(args, ignore_errors=True) @@ -326,7 +326,7 @@ class OnePassCLIv1(OnePassCLIBase): args = [ "signin", - "{0}.{1}".format(self.subdomain, self.domain), + f"{self.subdomain}.{self.domain}", to_bytes(self.username), to_bytes(self.secret_key), "--raw", @@ -341,7 +341,7 @@ class OnePassCLIv1(OnePassCLIBase): args.extend(["--account", self.account_id]) if vault is not None: - args += ["--vault={0}".format(vault)] + args += [f"--vault={vault}"] if token is not None: args += [to_bytes("--session=") + token] @@ -512,7 +512,7 @@ class OnePassCLIv2(OnePassCLIBase): args = ["account", "list"] if self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) rc, out, err = self._run(args) @@ -525,7 +525,7 @@ class OnePassCLIv2(OnePassCLIBase): if self.account_id: args.extend(["--account", self.account_id]) elif self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) rc, out, err = self._run(args, ignore_errors=True) @@ -545,7 +545,7 @@ class OnePassCLIv2(OnePassCLIBase): args = [ "account", "add", "--raw", - "--address", "{0}.{1}".format(self.subdomain, self.domain), + "--address", f"{self.subdomain}.{self.domain}", "--email", to_bytes(self.username), "--signin", ] @@ -560,7 +560,7 @@ class OnePassCLIv2(OnePassCLIBase): args.extend(["--account", self.account_id]) if vault is not None: - args += ["--vault={0}".format(vault)] + args += [f"--vault={vault}"] if self.connect_host and self.connect_token: if vault is None: @@ -627,7 +627,7 @@ class OnePass(object): except TypeError as e: raise AnsibleLookupError(e) - raise AnsibleLookupError("op version %s is unsupported" % version) + raise AnsibleLookupError(f"op version {version} is unsupported") def set_token(self): if self._config.config_file_path and os.path.isfile(self._config.config_file_path): diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py index 789e51c35a..b1728fce89 100644 --- a/plugins/lookup/onepassword_doc.py +++ b/plugins/lookup/onepassword_doc.py @@ -55,7 +55,7 @@ class OnePassCLIv2Doc(OnePassCLIv2): def get_raw(self, item_id, vault=None, token=None): args = ["document", "get", item_id] if vault is not None: - args = [*args, "--vault={0}".format(vault)] + args = [*args, f"--vault={vault}"] if self.service_account_token: if vault is None: diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index f35d268995..584690c175 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -315,7 +315,7 @@ class LookupModule(LookupBase): ) self.realpass = 'pass: the standard unix password manager' in passoutput except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return self.realpass @@ -332,7 +332,7 @@ class LookupModule(LookupBase): for param in params[1:]: name, value = param.split('=', 1) if name not in self.paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) @@ -344,12 +344,12 @@ class LookupModule(LookupBase): except (ValueError, AssertionError) as e: raise AnsibleError(e) if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: - raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) + raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing") if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: - raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length") if self.paramvals['create']: self.paramvals['missing'] = 'create' @@ -364,7 +364,7 @@ class LookupModule(LookupBase): # Set PASSWORD_STORE_DIR self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] elif self.is_real_pass(): - raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) + raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist") # Set PASSWORD_STORE_UMASK if umask is set if self.paramvals.get('umask') is not None: @@ -394,19 +394,19 @@ class LookupModule(LookupBase): name, value = line.split(':', 1) self.passdict[name.strip()] = value.strip() if (self.backend == 'gopass' or - os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg")) + os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg")) or not self.is_real_pass()): # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise) return True except (subprocess.CalledProcessError) as e: # 'not in password store' is the expected error if a password wasn't found if 'not in the password store' not in e.output: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') if self.paramvals['missing'] == 'error': - raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) + raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set') elif self.paramvals['missing'] == 'warn': - display.warning('passwordstore: passname {0} not found'.format(self.passname)) + display.warning(f'passwordstore: passname {self.passname} not found') return False @@ -433,11 +433,11 @@ class LookupModule(LookupBase): msg_lines = [] subkey_exists = False - subkey_line = "{0}: {1}".format(subkey, newpass) + subkey_line = f"{subkey}: {newpass}" oldpass = None for line in self.passoutput: - if line.startswith("{0}: ".format(subkey)): + if line.startswith(f"{subkey}: "): oldpass = self.passdict[subkey] line = subkey_line subkey_exists = True @@ -449,9 +449,7 @@ class LookupModule(LookupBase): if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: msg_lines.append( - "lookup_pass: old subkey '{0}' password was {1} (Updated on {2})\n".format( - subkey, oldpass, datetime - ) + f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n" ) msg = os.linesep.join(msg_lines) @@ -464,12 +462,12 @@ class LookupModule(LookupBase): if self.paramvals['preserve'] and self.passoutput[1:]: msg += '\n'.join(self.passoutput[1:]) + '\n' if self.paramvals['timestamp'] and self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n" try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return newpass def generate_password(self): @@ -480,17 +478,17 @@ class LookupModule(LookupBase): subkey = self.paramvals["subkey"] if subkey != "password": - msg = "\n\n{0}: {1}".format(subkey, newpass) + msg = f"\n\n{subkey}: {newpass}" else: msg = newpass if self.paramvals['timestamp']: - msg += '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + msg += f"\nlookup_pass: First generated by ansible on {datetime}\n" try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return newpass @@ -505,16 +503,12 @@ class LookupModule(LookupBase): else: if self.paramvals["missing_subkey"] == "error": raise AnsibleError( - "passwordstore: subkey {0} for passname {1} not found and missing_subkey=error is set".format( - self.paramvals["subkey"], self.passname - ) + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found and missing_subkey=error is set" ) if self.paramvals["missing_subkey"] == "warn": display.warning( - "passwordstore: subkey {0} for passname {1} not found".format( - self.paramvals["subkey"], self.passname - ) + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found" ) return None @@ -524,7 +518,7 @@ class LookupModule(LookupBase): if self.get_option('lock') == type: tmpdir = os.environ.get('TMPDIR', '/tmp') user = os.environ.get('USER') - lockfile = os.path.join(tmpdir, '.{0}.passwordstore.lock'.format(user)) + lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock') with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield @@ -538,7 +532,7 @@ class LookupModule(LookupBase): self.locked = None timeout = self.get_option('locktimeout') if not re.match('^[0-9]+[smh]$', timeout): - raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout)) + raise AnsibleError(f"{timeout} is not a correct value for locktimeout") unit_to_seconds = {"s": 1, "m": 60, "h": 3600} self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 71a62cbca0..77f1c34a51 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -95,6 +95,6 @@ class LookupModule(LookupBase): values = petname.Generate(words=words, separator=separator, letters=length) if prefix: - values = "%s%s%s" % (prefix, separator, values) + values = f"{prefix}{separator}{values}" return [values] diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 17cbf120e9..5c669a7f23 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -116,5 +116,5 @@ class LookupModule(LookupBase): ret.append(to_text(res)) except Exception as e: # connection failed or key not found - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py index e4118e89eb..89c19cf23c 100644 --- a/plugins/lookup/revbitspss.py +++ b/plugins/lookup/revbitspss.py @@ -100,8 +100,8 @@ class LookupModule(LookupBase): result = [] for term in terms: try: - display.vvv("Secret Server lookup of Secret with ID %s" % term) + display.vvv(f"Secret Server lookup of Secret with ID {term}") result.append({term: secret_server.get_pam_secret(term)}) except Exception as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") return result diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 70d18338e9..4d965372fb 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -71,7 +71,7 @@ class LookupModule(LookupBase): for param in params: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') paramvals[name] = value except (ValueError, AssertionError) as e: @@ -86,11 +86,11 @@ class LookupModule(LookupBase): if shelvefile: res = self.read_shelve(shelvefile, key) if res is None: - raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile)) + raise AnsibleError(f"Key {key} not found in shelve file {shelvefile}") # Convert the value read to string ret.append(to_text(res)) break else: - raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file']) + raise AnsibleError(f"Could not locate shelve file in lookup: {paramvals['file']}") return ret diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index f2d79ed168..344fa01678 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -306,14 +306,14 @@ class TSSClient(object): return TSSClientV0(**server_parameters) def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path): - display.debug("tss_lookup term: %s" % term) + display.debug(f"tss_lookup term: {term}") secret_id = self._term_to_secret_id(term) if secret_id == 0 and secret_path: fetch_secret_by_path = True - display.vvv(u"Secret Server lookup of Secret with path %s" % secret_path) + display.vvv(f"Secret Server lookup of Secret with path {secret_path}") else: fetch_secret_by_path = False - display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + display.vvv(f"Secret Server lookup of Secret with ID {secret_id}") if fetch_file_attachments: if fetch_secret_by_path: @@ -325,12 +325,12 @@ class TSSClient(object): if i['isFile']: try: file_content = i['itemValue'].content - with open(os.path.join(file_download_path, str(obj['id']) + "_" + i['slug']), "wb") as f: + with open(os.path.join(file_download_path, f"{str(obj['id'])}_{i['slug']}"), "wb") as f: f.write(file_content) except ValueError: - raise AnsibleOptionsError("Failed to download {0}".format(str(i['slug']))) + raise AnsibleOptionsError(f"Failed to download {str(i['slug'])}") except AttributeError: - display.warning("Could not read file content for {0}".format(str(i['slug']))) + display.warning(f"Could not read file content for {str(i['slug'])}") finally: i['itemValue'] = "*** Not Valid For Display ***" else: @@ -343,9 +343,9 @@ class TSSClient(object): return self._client.get_secret_json(secret_id) def get_secret_ids_by_folderid(self, term): - display.debug("tss_lookup term: %s" % term) + display.debug(f"tss_lookup term: {term}") folder_id = self._term_to_folder_id(term) - display.vvv(u"Secret Server lookup of Secret id's with Folder ID %d" % folder_id) + display.vvv(f"Secret Server lookup of Secret id's with Folder ID {folder_id}") return self._client.get_secret_ids_by_folderid(folder_id) @@ -447,4 +447,4 @@ class LookupModule(LookupBase): for term in terms ] except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt index 667c6cee4d..f624f28e01 100644 --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -1,4 +1,5 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen +plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index f6b058ec69..665101becf 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -1,3 +1,4 @@ +plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt' From 4b23e5ecffa0b12d97797d9deb007a635d56818d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 25 Dec 2024 21:48:22 +1300 Subject: [PATCH 403/482] s[o-y]*: normalize docs (#9351) * s[o-y]*: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/spectrum_model_attrs.py --------- Co-authored-by: Felix Fontein --- plugins/modules/solaris_zone.py | 38 +- plugins/modules/sorcery.py | 145 ++-- plugins/modules/spectrum_device.py | 152 ++--- plugins/modules/spectrum_model_attrs.py | 211 +++--- plugins/modules/spotinst_aws_elastigroup.py | 722 ++++++++------------ plugins/modules/ss_3par_cpg.py | 30 +- plugins/modules/ssh_config.py | 43 +- plugins/modules/stacki_host.py | 26 +- plugins/modules/statsd.py | 11 +- plugins/modules/statusio_maintenance.py | 223 +++--- plugins/modules/sudoers.py | 11 +- plugins/modules/supervisorctl.py | 35 +- plugins/modules/svc.py | 89 ++- plugins/modules/svr4pkg.py | 24 +- plugins/modules/swdepot.py | 53 +- plugins/modules/swupd.py | 34 +- plugins/modules/syslogger.py | 96 ++- plugins/modules/syspatch.py | 44 +- plugins/modules/sysrc.py | 98 ++- plugins/modules/sysupgrade.py | 83 ++- 20 files changed, 994 insertions(+), 1174 deletions(-) diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py index d9f44589dc..9f8f774cbe 100644 --- a/plugins/modules/solaris_zone.py +++ b/plugins/modules/solaris_zone.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: solaris_zone short_description: Manage Solaris zones description: @@ -31,16 +30,15 @@ options: description: - V(present), configure and install the zone. - V(installed), synonym for V(present). - - V(running), if the zone already exists, boot it, otherwise, configure and install - the zone first, then boot it. + - V(running), if the zone already exists, boot it, otherwise, configure and install the zone first, then boot it. - V(started), synonym for V(running). - V(stopped), shutdown a zone. - V(absent), destroy the zone. - V(configured), configure the ready so that it's to be attached. - V(attached), attach a zone, but do not boot it. - - V(detached), shutdown and detach a zone + - V(detached), shutdown and detach a zone. type: str - choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ] + choices: [absent, attached, configured, detached, installed, present, running, started, stopped] default: present name: description: @@ -53,8 +51,7 @@ options: required: true path: description: - - The path where the zone will be created. This is required when the zone is created, but not - used otherwise. + - The path where the zone will be created. This is required when the zone is created, but not used otherwise. type: str sparse: description: @@ -63,32 +60,29 @@ options: default: false root_password: description: - - The password hash for the root account. If not specified, the zone's root account - will not have a password. + - The password hash for the root account. If not specified, the zone's root account will not have a password. type: str config: description: - - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options - and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g. - "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"' + - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this is a list of options + separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end). type: str default: '' create_options: description: - - 'Extra options to the zonecfg(1M) create command.' + - Extra options to the zonecfg(1M) create command. type: str default: '' install_options: description: - - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, - use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"' + - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile XML file, for example + O(install_options=-c sc_profile.xml). type: str default: '' attach_options: description: - - 'Extra options to the zoneadm attach command. For example, this can be used to specify - whether a minimum or full update of packages is required and if any packages need to - be deleted. For valid values, see zoneadm(1M)' + - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update of packages is + required and if any packages need to be deleted. For valid values, see zoneadm(1M). type: str default: '' timeout: @@ -96,9 +90,9 @@ options: - Timeout, in seconds, for zone to boot. type: int default: 600 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create and install a zone, but don't boot it community.general.solaris_zone: name: zone1 @@ -149,7 +143,7 @@ EXAMPLES = ''' name: zone1 state: attached attach_options: -u -''' +""" import os import platform diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py index a525bd9ac8..9ad3d30f3b 100644 --- a/plugins/modules/sorcery.py +++ b/plugins/modules/sorcery.py @@ -10,93 +10,86 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sorcery short_description: Package manager for Source Mage GNU/Linux description: - - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain. author: "Vlad Glagolev (@vaygr)" notes: - - When all three components are selected, the update goes by the sequence -- - Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. - - Grimoire handling is supported since community.general 7.3.0. + - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. + - Grimoire handling is supported since community.general 7.3.0. requirements: - - bash + - bash extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the spell or grimoire. - - Multiple names can be given, separated by commas. - - Special value V(*) in conjunction with states V(latest) or - V(rebuild) will update or rebuild the whole system respectively - - The alias O(grimoire) was added in community.general 7.3.0. - aliases: ["spell", "grimoire"] - type: list - elements: str + name: + description: + - Name of the spell or grimoire. + - Multiple names can be given, separated by commas. + - Special value V(*) in conjunction with states V(latest) or V(rebuild) will update or rebuild the whole system respectively. + - The alias O(grimoire) was added in community.general 7.3.0. + aliases: ["spell", "grimoire"] + type: list + elements: str - repository: - description: - - Repository location. - - If specified, O(name) represents grimoire(s) instead of spell(s). - - Special value V(*) will pull grimoire from the official location. - - Only single item in O(name) in conjunction with V(*) can be used. - - O(state=absent) must be used with a special value V(*). - type: str - version_added: 7.3.0 + repository: + description: + - Repository location. + - If specified, O(name) represents grimoire(s) instead of spell(s). + - Special value V(*) will pull grimoire from the official location. + - Only single item in O(name) in conjunction with V(*) can be used. + - O(state=absent) must be used with a special value V(*). + type: str + version_added: 7.3.0 - state: - description: - - Whether to cast, dispel or rebuild a package. - - State V(cast) is an equivalent of V(present), not V(latest). - - State V(rebuild) implies cast of all specified spells, not only - those existed before. - choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] - default: "present" - type: str + state: + description: + - Whether to cast, dispel or rebuild a package. + - State V(cast) is an equivalent of V(present), not V(latest). + - State V(rebuild) implies cast of all specified spells, not only those existed before. + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + type: str - depends: - description: - - Comma-separated list of _optional_ dependencies to build a spell - (or make sure it is built) with; use V(+)/V(-) in front of dependency - to turn it on/off (V(+) is optional though). - - This option is ignored if O(name) parameter is equal to V(*) or - contains more than one spell. - - Providers must be supplied in the form recognized by Sorcery, - for example 'V(openssl(SSL\))'. - type: str + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in front of dependency + to turn it on/off (V(+) is optional though). + - This option is ignored if O(name) parameter is equal to V(*) or contains more than one spell. + - Providers must be supplied in the form recognized by Sorcery, for example 'V(openssl(SSL\))'. + type: str - update: - description: - - Whether or not to update sorcery scripts at the very first stage. - type: bool - default: false + update: + description: + - Whether or not to update sorcery scripts at the very first stage. + type: bool + default: false - update_cache: - description: - - Whether or not to update grimoire collection before casting spells. - type: bool - default: false - aliases: ["update_codex"] + update_cache: + description: + - Whether or not to update grimoire collection before casting spells. + type: bool + default: false + aliases: ["update_codex"] - cache_valid_time: - description: - - Time in seconds to invalidate grimoire collection on update. - - Especially useful for SCM and rsync grimoires. - - Makes sense only in pair with O(update_cache). - type: int - default: 0 -''' + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update. + - Especially useful for SCM and rsync grimoires. + - Makes sense only in pair with O(update_cache). + type: int + default: 0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure spell foo is installed community.general.sorcery: spell: foo @@ -131,9 +124,9 @@ EXAMPLES = ''' depends: "{{ item.depends | default(None) }}" state: present loop: - - { spell: 'vifm', depends: '+file,-gtk+2' } - - { spell: 'fwknop', depends: 'gpgme' } - - { spell: 'pv,tnftp,tor' } + - {spell: 'vifm', depends: '+file,-gtk+2'} + - {spell: 'fwknop', depends: 'gpgme'} + - {spell: 'pv,tnftp,tor'} - name: Install the latest version of spell foo using regular glossary community.general.sorcery: @@ -184,11 +177,11 @@ EXAMPLES = ''' - name: Update only Sorcery itself community.general.sorcery: update: true -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py index 7cf7cf9150..bb9761d37d 100644 --- a/plugins/modules/spectrum_device.py +++ b/plugins/modules/spectrum_device.py @@ -9,88 +9,86 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: spectrum_device short_description: Creates/deletes devices in CA Spectrum description: - - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). - - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1. author: "Renato Orgito (@orgito)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - device: - type: str - aliases: [ host, name ] - required: true - description: - - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. - community: - type: str - description: - - SNMP community used for device discovery. - - Required when O(state=present). - required: true - landscape: - type: str - required: true - description: - - Landscape handle of the SpectroServer to which add or remove the device. - state: - type: str - description: - - On V(present) creates the device when it does not exist. - - On V(absent) removes the device when it exists. - choices: ['present', 'absent'] - default: 'present' - url: - type: str - aliases: [ oneclick_url ] - required: true - description: - - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). - url_username: - type: str - aliases: [ oneclick_user ] - required: true - description: - - Oneclick user name. - url_password: - type: str - aliases: [ oneclick_password ] - required: true - description: - - Oneclick user password. - use_proxy: - description: - - if V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. - default: true - type: bool - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: true - type: bool - agentport: - type: int - required: false - description: - - UDP port used for SNMP discovery. - default: 161 + device: + type: str + aliases: [host, name] + required: true + description: + - IP address of the device. + - If a hostname is given, it will be resolved to the IP address. + community: + type: str + description: + - SNMP community used for device discovery. + - Required when O(state=present). + required: true + landscape: + type: str + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + type: str + description: + - On V(present) creates the device when it does not exist. + - On V(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + type: str + aliases: [oneclick_url] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). + url_username: + type: str + aliases: [oneclick_user] + required: true + description: + - Oneclick user name. + url_password: + type: str + aliases: [oneclick_password] + required: true + description: + - Oneclick user password. + use_proxy: + description: + - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + type: bool + validate_certs: + description: + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + default: true + type: bool + agentport: + type: int + required: false + description: + - UDP port used for SNMP discovery. + default: 161 notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. -''' + - The devices will be created inside the I(Universe) container of the specified landscape. + - All the operations will be performed only on the specified landscape. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add device to CA Spectrum local_action: module: spectrum_device @@ -113,15 +111,15 @@ EXAMPLES = ''' oneclick_password: password use_proxy: false state: absent -''' +""" -RETURN = ''' +RETURN = r""" device: - description: device data when state = present + description: Device data when O(state=present). returned: success type: dict sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} -''' +""" from socket import gethostbyname, gaierror import xml.etree.ElementTree as ET diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py index 43983a11a5..3057f04c15 100644 --- a/plugins/modules/spectrum_model_attrs.py +++ b/plugins/modules/spectrum_model_attrs.py @@ -9,110 +9,108 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: spectrum_model_attrs short_description: Enforce a model's attributes in CA Spectrum description: - - This module can be used to enforce a model's attributes in CA Spectrum. + - This module can be used to enforce a model's attributes in CA Spectrum. version_added: 2.5.0 author: - - Tyler Gates (@tgates81) + - Tyler Gates (@tgates81) notes: - - Tested on CA Spectrum version 10.4.2.0.189. - - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - url: - description: - - URL of OneClick server. - type: str - required: true - url_username: - description: - - OneClick username. - type: str - required: true - aliases: [username] - url_password: - description: - - OneClick password. - type: str - required: true - aliases: [password] - use_proxy: - description: - - if V(false), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - default: true - required: false - type: bool - name: - description: - - Model name. - type: str - required: true - type: - description: - - Model type. - type: str - required: true - validate_certs: - description: - - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is no - man-in-the-middle attack happening. - type: bool - default: true - required: false - attributes: - description: - - A list of attribute names and values to enforce. - - All values and parameters are case sensitive and must be provided as strings only. - required: true - type: list - elements: dict - suboptions: - name: - description: - - Attribute name OR hex ID. - - 'Currently defined names are:' - - ' C(App_Manufacturer) (C(0x230683))' - - ' C(CollectionsModelNameString) (C(0x12adb))' - - ' C(Condition) (C(0x1000a))' - - ' C(Criticality) (C(0x1290c))' - - ' C(DeviceType) (C(0x23000e))' - - ' C(isManaged) (C(0x1295d))' - - ' C(Model_Class) (C(0x11ee8))' - - ' C(Model_Handle) (C(0x129fa))' - - ' C(Model_Name) (C(0x1006e))' - - ' C(Modeltype_Handle) (C(0x10001))' - - ' C(Modeltype_Name) (C(0x10000))' - - ' C(Network_Address) (C(0x12d7f))' - - ' C(Notes) (C(0x11564))' - - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' - - ' C(TopologyModelNameString) (C(0x129e7))' - - ' C(sysDescr) (C(0x10052))' - - ' C(sysName) (C(0x10b5b))' - - ' C(Vendor_Name) (C(0x11570))' - - ' C(Description) (C(0x230017))' - - Hex IDs are the direct identifiers in Spectrum and will always work. - - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' - type: str - required: true - value: - description: - - Attribute value. Empty strings should be V("") or V(null). - type: str - required: true -''' + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is + no man-in-the-middle attack happening. + type: bool + default: true + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - C(App_Manufacturer) (C(0x230683)); + - C(CollectionsModelNameString) (C(0x12adb)); + - C(Condition) (C(0x1000a)); + - C(Criticality) (C(0x1290c)); + - C(DeviceType) (C(0x23000e)); + - C(isManaged) (C(0x1295d)); + - C(Model_Class) (C(0x11ee8)); + - C(Model_Handle) (C(0x129fa)); + - C(Model_Name) (C(0x1006e)); + - C(Modeltype_Handle) (C(0x10001)); + - C(Modeltype_Name) (C(0x10000)); + - C(Network_Address) (C(0x12d7f)); + - C(Notes) (C(0x11564)); + - C(ServiceDesk_Asset_ID) (C(0x12db9)); + - C(TopologyModelNameString) (C(0x129e7)); + - C(sysDescr) (C(0x10052)); + - C(sysName) (C(0x10b5b)); + - C(Vendor_Name) (C(0x11570)); + - C(Description) (C(0x230017)). + - Hex IDs are the direct identifiers in Spectrum and will always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be V("") or V(null). + type: str + required: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enforce maintenance mode for modelxyz01 with a note about why community.general.spectrum_model_attrs: url: "http://oneclick.url.com" @@ -128,23 +126,20 @@ EXAMPLES = r''' value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" delegate_to: localhost register: spectrum_model_attrs_status -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: Informational message on the job result. - type: str - returned: always - sample: 'Success' + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' changed_attrs: - description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. - type: dict - returned: always - sample: { - "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", - "isManaged": "true" - } -''' + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: {"Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", "isManaged": "true"} +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py index 45556f621c..d07761ee05 100644 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -5,19 +5,15 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import (absolute_import, division, print_function) -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: spotinst_aws_elastigroup short_description: Create, update or delete Spotinst AWS Elastigroups author: Spotinst (@talzur) description: - - Can create, update, or delete Spotinst AWS Elastigroups - Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. - You will have to have a credentials file in this location - /.spotinst/credentials - The credentials file must contain a row that looks like this - token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- + - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, so no additional modules + are necessary for handling the launch configuration. You will have to have a credentials file in this location - C($HOME/.spotinst/credentials). + The credentials file must contain a row that looks like this C(token = ). + - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). requirements: - spotinst_sdk >= 1.0.38 extends_documentation_fragment: @@ -38,62 +34,43 @@ options: account_id: description: - Optional parameter that allows to set an account-id inside the module configuration. - By default this is retrieved from the credentials path. + - By default this is retrieved from the credentials path. type: str token: description: - A Personal API Access Token issued by Spotinst. - - >- - When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials path. + - 'When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials + path.' type: str availability_vs_cost: description: - The strategy orientation. - - "The choices available are: V(availabilityOriented), V(costOriented), V(balanced)." + - 'The choices available are: V(availabilityOriented), V(costOriented), V(balanced).' required: true type: str availability_zones: description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - name (String), - subnet_id (String), - placement_group_name (String), + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed + are name (String), subnet_id (String), placement_group_name (String),. required: true type: list elements: dict block_device_mappings: description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; - You can specify virtual devices and EBS volumes.; - '[{"key":"value", "key":"value"}]'; - keys allowed are - device_name (List of Strings), - virtual_name (String), - no_device (String), - ebs (Object, expects the following keys- - delete_on_termination(Boolean), - encrypted(Boolean), - iops (Integer), - snapshot_id(Integer), - volume_type(String), - volume_size(Integer)) + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", + "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), no_device (String), ebs (Object, expects the + following keys- delete_on_termination(Boolean), encrypted(Boolean), iops (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). type: list elements: dict chef: description: - - The Chef integration configuration.; - Expects the following keys - chef_server (String), - organization (String), - user (String), - pem_key (String), - chef_version (String) + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), pem_key + (String), chef_version (String). type: dict draining_timeout: @@ -103,36 +80,30 @@ options: ebs_optimized: description: - - Enable EBS optimization for supported instances which are not enabled by default.; - Note - additional charges will be applied. + - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. type: bool ebs_volume_pool: description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - volume_ids (List of Strings), - device_name (String) + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed + are - volume_ids (List of Strings), device_name (String). type: list elements: dict ecs: description: - - The ECS integration configuration.; - Expects the following key - - cluster_name (String) + - The ECS integration configuration.; Expects the following key - cluster_name (String). type: dict elastic_ips: description: - - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances. type: list elements: str fallback_to_od: description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup will launch an On-demand instance instead. type: bool health_check_grace_period: @@ -149,159 +120,129 @@ options: health_check_type: description: - The service to use for the health check. - - "The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2)." + - 'The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2).' type: str iam_role_name: description: - - The instance profile iamRole name - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole name. + - Only use O(iam_role_arn) or O(iam_role_name). type: str iam_role_arn: description: - - The instance profile iamRole arn - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole arn. + - Only use O(iam_role_arn) or O(iam_role_name). type: str id: description: - - The group id if it already exists and you want to update, or delete it. - This will not work unless the uniqueness_by field is set to id. + - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. type: str image_id: description: - - The image Id used to launch the instance.; - In case of conflict between Instance type and image type, an error will be returned + - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned. required: true type: str key_pair: description: - - Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances. type: str kubernetes: description: - - The Kubernetes integration configuration. - Expects the following keys - - api_server (String), - token (String) + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String). type: dict lifetime_period: description: - - Lifetime period + - Lifetime period. type: int load_balancers: description: - - List of classic ELB names + - List of classic ELB names. type: list elements: str max_size: description: - - The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to. required: true type: int mesosphere: description: - - The Mesosphere integration configuration. - Expects the following key - - api_server (String) + - The Mesosphere integration configuration. Expects the following key - api_server (String). type: dict min_size: description: - - The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to. required: true type: int monitoring: description: - - Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled. type: str name: description: - - Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted. required: true type: str network_interfaces: description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - description (String), - device_index (Integer), - secondary_private_ip_address_count (Integer), - associate_public_ip_address (Boolean), - delete_on_termination (Boolean), - groups (List of Strings), - network_interface_id (String), - private_ip_address (String), - subnet_id (String), - associate_ipv6_address (Boolean), - private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description + (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address (Boolean), delete_on_termination + (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address (String), subnet_id (String), associate_ipv6_address + (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)). type: list elements: dict on_demand_count: description: - - Required if risk is not set - - Number of on demand instances to launch. All other instances will be spot instances.; - Either set this parameter or the risk parameter + - Required if risk is not set. + - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter. type: int on_demand_instance_type: description: - - On-demand instance type that will be provisioned + - On-demand instance type that will be provisioned. type: str opsworks: description: - - The elastigroup OpsWorks integration configuration.; - Expects the following key - - layer_id (String) + - The elastigroup OpsWorks integration configuration.; Expects the following key - layer_id (String). type: dict persistence: description: - - The Stateful elastigroup configuration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) + - The Stateful elastigroup configuration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices + (Boolean), should_persist_private_ip (Boolean). type: dict product: description: - Operation system type. - - "Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC))." + - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC)).' required: true type: str rancher: description: - - The Rancher integration configuration.; - Expects the following keys - - version (String), - access_key (String), - secret_key (String), - master_host (String) + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key (String), master_host + (String). type: dict right_scale: description: - - The Rightscale integration configuration.; - Expects the following keys - - account_id (String), - refresh_token (String) + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String). type: dict risk: @@ -311,59 +252,42 @@ options: roll_config: description: - - Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) + - Roll configuration. + - If you would like the group to roll after updating, please use this feature. + - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, Optional). type: dict scheduled_tasks: description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - adjustment (Integer), - scale_target_capacity (Integer), - scale_min_capacity (Integer), - scale_max_capacity (Integer), - adjustment_percentage (Integer), - batch_size_percentage (Integer), - cron_expression (String), - frequency (String), - grace_period (Integer), - task_type (String, required), - is_enabled (Boolean) + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). + - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity (Integer), + adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), grace_period (Integer), + task_type (String, required), is_enabled (Boolean).' type: list elements: dict security_group_ids: description: - - One or more security group IDs. ; - In case of update it will override the existing Security Group with the new given array + - One or more security group IDs. + - In case of update it will override the existing Security Group with the new given array. required: true type: list elements: str shutdown_script: description: - - The Base64-encoded shutdown script that executes prior to instance termination. - Encode before setting. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. type: str signals: description: - - A list of hash/dictionaries of signals to configure in the elastigroup; - keys allowed are - - name (String, required), - timeout (Integer) + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer). type: list elements: dict spin_up_time: description: - - Spin up time, in seconds, for the instance + - Spin up time, in seconds, for the instance. type: int spot_instance_types: @@ -378,108 +302,70 @@ options: - present - absent description: - - Create or delete the elastigroup + - Create or delete the elastigroup. default: present type: str tags: description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value). type: list elements: dict target: description: - - The number of instances to launch + - The number of instances to launch. required: true type: int target_group_arns: description: - - List of target group arns instances should be registered to + - List of target group arns instances should be registered to. type: list elements: str tenancy: description: - Dedicated vs shared tenancy. - - "The available choices are: V(default), V(dedicated)." + - 'The available choices are: V(default), V(dedicated).' type: str terminate_at_end_of_billing_hour: description: - - Terminate at the end of billing hour + - Terminate at the end of billing hour. type: bool unit: description: - The capacity unit to launch instances by. - - "The available choices are: V(instance), V(weight)." + - 'The available choices are: V(instance), V(weight).' type: str up_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required) - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - min_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are + - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions (List of Objects, Keys allowed + are name (String, required) and value (String)), statistic (String, required) evaluation_periods (String, required), period (String, required), + threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type (String, + required), adjustment (String), min_target_capacity (String), target (String), maximum (String), minimum (String). type: list elements: dict down_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), - statistic (String, required), - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - max_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are + - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions ((List of Objects), Keys allowed + are name (String, required) and value (String)), statistic (String, required), evaluation_periods (String, required), period (String, + required), threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type + (String, required), adjustment (String), max_target_capacity (String), target (String), maximum (String), minimum (String). type: list elements: dict target_tracking_policies: description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - source (String, required), - metric_name (String, required), - statistic (String, required), - unit (String, required), - cooldown (String, required), - target (String, required) + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed + are - policy_name (String, required), namespace (String, required), source (String, required), metric_name (String, required), statistic + (String, required), unit (String, required), cooldown (String, required), target (String, required). type: list elements: dict @@ -488,8 +374,8 @@ options: - id - name description: - - If your group names are not unique, you may use this feature to update or delete a specific group. - Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you + must set a group_id in order to update or delete a group, otherwise a group will be created. default: name type: str @@ -500,20 +386,19 @@ options: utilize_reserved_instances: description: - - In case of any available Reserved Instances, - Elastigroup will utilize your reservations before purchasing Spot instances. + - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin + - Whether or not the elastigroup creation / update actions should wait for the instances to spin. type: bool default: false wait_timeout: description: - - How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. + - How long the module should wait for instances before failing the action. + - Only works if O(wait_for_instances=true). type: int do_not_update: @@ -538,40 +423,39 @@ options: description: - Placeholder parameter for future implementation of Elastic Beanstalk configurations. type: dict - -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" # Basic configuration YAML example - hosts: localhost tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -581,39 +465,39 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -628,43 +512,43 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -678,36 +562,36 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -718,34 +602,34 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -756,70 +640,68 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: true - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: true + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id register: result - ansible.builtin.debug: var=result -''' +""" -RETURN = ''' ---- +RETURN = r""" instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", - "status": "fulfilled" - } - ] + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: [ + { + "spotInstanceRequestId": "sir-regs25zp", + "instanceId": "i-09640ad8678234c", + "instanceType": "m4.large", + "product": "Linux/UNIX", + "availabilityZone": "us-west-2b", + "privateIp": "180.0.2.244", + "createdAt": "2017-07-17T12:46:18.000Z", + "status": "fulfilled" + } + ] group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" - -''' + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" +""" HAS_SPOTINST_SDK = False __metaclass__ = type diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py index 32c1cd443f..c9c9b4bd90 100644 --- a/plugins/modules/ss_3par_cpg.py +++ b/plugins/modules/ss_3par_cpg.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" short_description: Manage HPE StoreServ 3PAR CPG author: - Farhan Nomani (@farhan7500) @@ -43,18 +42,15 @@ options: type: str growth_increment: description: - - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage - created on each auto-grow operation. + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage created on each auto-grow operation. type: str growth_limit: description: - - Specifies that the autogrow operation is limited to the specified - storage amount that sets the growth limit(in MiB, GiB or TiB). + - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit(in MiB, GiB or TiB). type: str growth_warning: description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded - results in a warning alert. + - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. type: str high_availability: choices: @@ -62,8 +58,7 @@ options: - CAGE - MAG description: - - Specifies that the layout must support the failure of one port pair, - one cage, or one magazine. + - Specifies that the layout must support the failure of one port pair, one cage, or one magazine. type: str raid_type: choices: @@ -92,13 +87,12 @@ options: type: bool default: false extends_documentation_fragment: -- community.general.hpe3par -- community.general.attributes - -''' + - community.general.hpe3par + - community.general.attributes +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create CPG sample_cpg community.general.ss_3par_cpg: storage_system_ip: 10.10.10.1 @@ -124,10 +118,10 @@ EXAMPLES = r''' state: absent cpg_name: sample_cpg secure: false -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index 1f8098b25f..582d7c127e 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -11,18 +11,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ssh_config short_description: Manage SSH config for user version_added: '2.0.0' description: - - Configures SSH hosts with special C(IdentityFile)s and hostnames. + - Configures SSH hosts with special C(IdentityFile)s and hostnames. author: - - Björn Andersson (@gaqzi) - - Abhijeet Kasurde (@Akasurde) + - Björn Andersson (@gaqzi) + - Abhijeet Kasurde (@Akasurde) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -33,7 +32,7 @@ options: description: - Whether a host entry should exist or not. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str user: description: @@ -50,8 +49,7 @@ options: host: description: - The endpoint this configuration is valid for. - - Can be an actual address on the internet or an alias that will - connect to the value of O(hostname). + - Can be an actual address on the internet or an alias that will connect to the value of O(hostname). required: true type: str hostname: @@ -68,17 +66,14 @@ options: type: str identity_file: description: - - The path to an identity file (SSH private key) that will be used - when connecting to this host. + - The path to an identity file (SSH private key) that will be used when connecting to this host. - File need to exist and have mode V(0600) to be valid. type: path identities_only: description: - - Specifies that SSH should only use the configured authentication - identity and certificate files (either the default files, or - those explicitly configured in the C(ssh_config) files or passed on - the ssh command-line), even if ssh-agent or a PKCS11Provider or - SecurityKeyProvider offers more identities. + - Specifies that SSH should only use the configured authentication identity and certificate files (either the default files, or those explicitly + configured in the C(ssh_config) files or passed on the ssh command-line), even if ssh-agent or a PKCS11Provider or SecurityKeyProvider + offers more identities. type: bool version_added: 8.2.0 user_known_hosts_file: @@ -89,7 +84,7 @@ options: description: - Whether to strictly check the host key when doing connections to the remote host. - The value V(accept-new) is supported since community.general 8.6.0. - choices: [ 'yes', 'no', 'ask', 'accept-new' ] + choices: ['yes', 'no', 'ask', 'accept-new'] type: str proxycommand: description: @@ -126,7 +121,7 @@ options: controlmaster: description: - Sets the C(ControlMaster) option. - choices: [ 'yes', 'no', 'ask', 'auto', 'autoask' ] + choices: ['yes', 'no', 'ask', 'auto', 'autoask'] type: str version_added: 8.1.0 controlpath: @@ -145,10 +140,10 @@ options: type: str version_added: 10.1.0 requirements: -- paramiko -''' + - paramiko +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a host in the configuration community.general.ssh_config: user: akasurde @@ -163,9 +158,9 @@ EXAMPLES = r''' ssh_config_file: "{{ ssh_config_test }}" host: "example.com" state: absent -''' +""" -RETURN = r''' +RETURN = r""" hosts_added: description: A list of host added. returned: success @@ -201,7 +196,7 @@ hosts_change_diff: } } ] -''' +""" import os diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py index 57440a24d0..4b37d256cb 100644 --- a/plugins/modules/stacki_host.py +++ b/plugins/modules/stacki_host.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: stacki_host short_description: Add or remove host to stacki front-end description: @@ -35,8 +34,7 @@ options: type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not - specified, the environment variable E(stacki_password) is used instead. + - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is used instead. required: true type: str stacki_endpoint: @@ -68,7 +66,7 @@ options: description: - Set value to the desired state for the specified host. type: str - choices: [ absent, present ] + choices: [absent, present] default: present appliance: description: @@ -96,10 +94,10 @@ options: type: str default: private author: -- Hugh Ma (@bbyhuy) -''' + - Hugh Ma (@bbyhuy) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a host named test-1 community.general.stacki_host: name: test-1 @@ -117,27 +115,27 @@ EXAMPLES = ''' stacki_password: pwd stacki_endpoint: url state: absent -''' +""" -RETURN = ''' +RETURN = r""" changed: - description: response to whether or not the api call completed successfully + description: Response to whether or not the api call completed successfully. returned: always type: bool sample: true stdout: - description: the set of responses from the commands + description: The set of responses from the commands. returned: always type: list sample: ['...', '...'] stdout_lines: - description: the value of stdout split into a list + description: The value of stdout split into a list. returned: always type: list sample: [['...', '...'], ['...'], ['...']] -''' +""" import json diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py index 8bc0f0b187..dcb3f0252e 100644 --- a/plugins/modules/statsd.py +++ b/plugins/modules/statsd.py @@ -7,15 +7,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: statsd short_description: Send metrics to StatsD version_added: 2.1.0 description: - The C(statsd) module sends metrics to StatsD. - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). - - Supported metric types are V(counter) and V(gauge). - Currently unupported metric types are V(timer), V(set), and V(gaugedelta). + - Supported metric types are V(counter) and V(gauge). Currently unupported metric types are V(timer), V(set), and V(gaugedelta). author: "Mark Mercado (@mamercad)" requirements: - statsd @@ -80,9 +79,9 @@ options: default: false description: - If the metric is of type V(gauge), change the value by O(delta). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Increment the metric my_counter by 1 community.general.statsd: host: localhost @@ -100,7 +99,7 @@ EXAMPLES = ''' metric: my_gauge metric_type: gauge value: 7 -''' +""" from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py index 0a96d0fb41..6f17523e25 100644 --- a/plugins/modules/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -9,127 +9,123 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: statusio_maintenance short_description: Create maintenance windows for your status.io dashboard description: - - Creates a maintenance window for status.io - - Deletes a maintenance window for status.io + - Creates or deletes a maintenance window for status.io. notes: - - You can use the apiary API url (http://docs.statusio.apiary.io/) to - capture API traffic - - Use start_date and start_time with minutes to set future maintenance window + - You can use the apiary API URL (U(http://docs.statusio.apiary.io/)) to capture API traffic. + - Use start_date and start_time with minutes to set future maintenance window. author: Benjamin Copeland (@bhcopeland) extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - title: - type: str - description: - - A descriptive title for the maintenance window - default: "A new maintenance window" - desc: - type: str - description: - - Message describing the maintenance window - default: "Created by Ansible" - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent"] - api_id: - type: str - description: - - Your unique API ID from status.io - required: true - api_key: - type: str - description: - - Your unique API Key from status.io - required: true - statuspage: - type: str - description: - - Your unique StatusPage ID from status.io - required: true - url: - type: str - description: - - Status.io API URL. A private apiary can be used instead. - default: "https://api.status.io" - components: - type: list - elements: str - description: - - The given name of your component (server name) - aliases: ['component'] - containers: - type: list - elements: str - description: - - The given name of your container (data center) - aliases: ['container'] - all_infrastructure_affected: - description: - - If it affects all components and containers - type: bool - default: false - automation: - description: - - Automatically start and end the maintenance window - type: bool - default: false - maintenance_notify_now: - description: - - Notify subscribers now - type: bool - default: false - maintenance_notify_72_hr: - description: - - Notify subscribers 72 hours before maintenance start time - type: bool - default: false - maintenance_notify_24_hr: - description: - - Notify subscribers 24 hours before maintenance start time - type: bool - default: false - maintenance_notify_1_hr: - description: - - Notify subscribers 1 hour before maintenance start time - type: bool - default: false - maintenance_id: - type: str - description: - - The maintenance id number when deleting a maintenance window - minutes: - type: int - description: - - The length of time in UTC that the maintenance will run - (starting from playbook runtime) - default: 10 - start_date: - type: str - description: - - Date maintenance is expected to start (Month/Day/Year) (UTC) - - End Date is worked out from start_date + minutes - start_time: - type: str - description: - - Time maintenance is expected to start (Hour:Minutes) (UTC) - - End Time is worked out from start_time + minutes -''' + title: + type: str + description: + - A descriptive title for the maintenance window. + default: "A new maintenance window" + desc: + type: str + description: + - Message describing the maintenance window. + default: "Created by Ansible" + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + type: str + description: + - Your unique API ID from status.io. + required: true + api_key: + type: str + description: + - Your unique API Key from status.io. + required: true + statuspage: + type: str + description: + - Your unique StatusPage ID from status.io. + required: true + url: + type: str + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + type: list + elements: str + description: + - The given name of your component (server name). + aliases: ['component'] + containers: + type: list + elements: str + description: + - The given name of your container (data center). + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers. + type: bool + default: false + automation: + description: + - Automatically start and end the maintenance window. + type: bool + default: false + maintenance_notify_now: + description: + - Notify subscribers now. + type: bool + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time. + type: bool + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time. + type: bool + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time. + type: bool + default: false + maintenance_id: + type: str + description: + - The maintenance id number when deleting a maintenance window. + minutes: + type: int + description: + - The length of time in UTC that the maintenance will run (starting from playbook runtime). + default: 10 + start_date: + type: str + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC). + - End Date is worked out from O(start_date) + O(minutes). + start_time: + type: str + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC). + - End Time is worked out from O(start_time) + O(minutes). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance community.general.statusio_maintenance: title: Router Upgrade from ansible @@ -176,10 +172,9 @@ EXAMPLES = ''' api_id: api_id api_key: api_key state: absent - -''' +""" # TODO: Add RETURN documentation. -RETURN = ''' # ''' +RETURN = """ # """ import datetime import json diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py index a392b4adfa..2735ce72cd 100644 --- a/plugins/modules/sudoers.py +++ b/plugins/modules/sudoers.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sudoers short_description: Manage sudoers files version_added: "4.3.0" @@ -98,11 +97,11 @@ options: - If V(required), visudo must be available to validate the sudoers rule. type: str default: detect - choices: [ absent, detect, required ] + choices: [absent, detect, required] version_added: 5.2.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Allow the backup user to sudo /usr/local/bin/backup community.general.sudoers: name: allow-backup @@ -158,7 +157,7 @@ EXAMPLES = ''' user: alice commands: /usr/bin/less noexec: true -''' +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py index e8d9c89a65..9e6900e234 100644 --- a/plugins/modules/supervisorctl.py +++ b/plugins/modules/supervisorctl.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: supervisorctl short_description: Manage the state of a program or group of programs running via supervisord description: - - Manage the state of a program or group of programs running via supervisord + - Manage the state of a program or group of programs running via supervisord. extends_documentation_fragment: - community.general.attributes attributes: @@ -33,29 +32,29 @@ options: config: type: path description: - - The supervisor configuration file path + - The supervisor configuration file path. server_url: type: str description: - - URL on which supervisord server is listening + - URL on which supervisord server is listening. username: type: str description: - - username to use for authentication + - Username to use for authentication. password: type: str description: - - password to use for authentication + - Password to use for authentication. state: type: str description: - The desired state of program/group. required: true - choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ] + choices: ["present", "started", "stopped", "restarted", "absent", "signalled"] stop_before_removing: type: bool description: - - Use O(stop_before_removing=true) to stop the program/group before removing it + - Use O(stop_before_removing=true) to stop the program/group before removing it. required: false default: false version_added: 7.5.0 @@ -66,19 +65,19 @@ options: supervisorctl_path: type: path description: - - path to supervisorctl executable + - Path to C(supervisorctl) executable. notes: - When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). - - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. - If the program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true). -requirements: [ "supervisorctl" ] + - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. If the + program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true). +requirements: ["supervisorctl"] author: - - "Matt Wright (@mattupstate)" - - "Aaron Wang (@inetfuture) " -''' + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Manage the state of program to be in started state community.general.supervisorctl: name: my_app @@ -113,7 +112,7 @@ EXAMPLES = ''' community.general.supervisorctl: name: all state: restarted -''' +""" import os from ansible.module_utils.basic import AnsibleModule, is_executable diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py index b327ddfd60..17667c2cc8 100644 --- a/plugins/modules/svc.py +++ b/plugins/modules/svc.py @@ -8,60 +8,55 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svc author: -- Brian Coca (@bcoca) + - Brian Coca (@bcoca) short_description: Manage daemontools services description: - - Controls daemontools services on remote hosts using the svc utility. + - Controls daemontools services on remote hosts using the svc utility. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the service to manage. - type: str - required: true - state: - description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. V(restarted) will always bounce the - svc (svc -t) and V(killed) will always bounce the svc (svc -k). - V(reloaded) will send a sigusr1 (svc -1). - V(once) will run a normally downed svc once (svc -o), not really - an idempotent operation. - type: str - choices: [ killed, once, reloaded, restarted, started, stopped ] - downed: - description: - - Should a 'down' file exist or not, if it exists it disables auto startup. - Defaults to no. Downed does not imply stopped. - type: bool - enabled: - description: - - Whether the service is enabled or not, if disabled it also implies stopped. - Take note that a service can be enabled and downed (no auto restart). - type: bool - service_dir: - description: - - Directory svscan watches for services - type: str - default: /service - service_src: - description: - - Directory where services are defined, the source of symlinks to service_dir. - type: str - default: /etc/service -''' + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the svc (svc + -t) and V(killed) will always bounce the svc (svc -k). V(reloaded) will send a sigusr1 (svc -1). V(once) will run a normally downed svc + once (svc -o), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + downed: + description: + - Should a 'down' file exist or not, if it exists it disables auto startup. Defaults to no. Downed does not imply stopped. + type: bool + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. Take note that a service can be enabled and downed (no auto + restart). + type: bool + service_dir: + description: + - Directory svscan watches for services. + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/service +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start svc dnscache, if not running community.general.svc: name: dnscache @@ -92,7 +87,7 @@ EXAMPLES = ''' name: dnscache state: reloaded service_dir: /var/service -''' +""" import os import re diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py index 56ded66e62..ac919d749b 100644 --- a/plugins/modules/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -10,19 +10,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svr4pkg short_description: Manage Solaris SVR4 packages description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. author: "Boyd Adamson (@brontitall)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -47,8 +44,9 @@ options: src: description: - Specifies the location to install the package from. Required when O(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there. + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http://server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them + there. type: str proxy: description: @@ -73,9 +71,9 @@ options: required: false type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package from an already copied file community.general.svr4pkg: name: CSWcommon @@ -106,7 +104,7 @@ EXAMPLES = ''' name: FIREFOX state: absent category: true -''' +""" import os diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py index 9ba1b02b30..628c63f810 100644 --- a/plugins/modules/swdepot.py +++ b/plugins/modules/swdepot.py @@ -12,41 +12,40 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swdepot short_description: Manage packages with swdepot package manager (HP-UX) description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) + - Will install, upgrade and remove packages with swdepot package manager (HP-UX). notes: [] author: "Raul Melo (@melodous)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - package name. - aliases: [pkg] - required: true - type: str - state: - description: - - whether to install (V(present), V(latest)), or remove (V(absent)) a package. - required: true - choices: [ 'present', 'latest', 'absent'] - type: str - depot: - description: - - The source repository from which install or upgrade a package. - type: str -''' + name: + description: + - Package name. + aliases: [pkg] + required: true + type: str + state: + description: + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + required: true + choices: ['present', 'latest', 'absent'] + type: str + depot: + description: + - The source repository from which install or upgrade a package. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package community.general.swdepot: name: unzip-6.0 @@ -63,7 +62,7 @@ EXAMPLES = ''' community.general.swdepot: name: unzip state: absent -''' +""" import re diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py index 16738c8cb8..9b13a4e658 100644 --- a/plugins/modules/swupd.py +++ b/plugins/modules/swupd.py @@ -10,13 +10,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swupd short_description: Manages updates and bundles in ClearLinux systems description: - - Manages updates and bundles with the swupd bundle manager, which is used by the - Clear Linux Project for Intel Architecture. + - Manages updates and bundles with the swupd bundle manager, which is used by the Clear Linux Project for Intel Architecture. author: Alberto Murillo (@albertomurillo) extends_documentation_fragment: - community.general.attributes @@ -28,19 +26,16 @@ attributes: options: contenturl: description: - - URL pointing to the contents of available bundles. - If not specified, the contents are retrieved from clearlinux.org. + - URL pointing to the contents of available bundles. If not specified, the contents are retrieved from clearlinux.org. type: str format: description: - - The format suffix for version file downloads. For example [1,2,3,staging,etc]. - If not specified, the default format is used. + - The format suffix for version file downloads. For example [1,2,3,staging,etc]. If not specified, the default format is used. type: str manifest: description: - - The manifest contains information about the bundles at certain version of the OS. - Specify a Manifest version to verify against that version or leave unspecified to - verify against the current version. + - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify against that version + or leave unspecified to verify against the current version. aliases: [release, version] type: int name: @@ -50,8 +45,7 @@ options: type: str state: description: - - Indicates the desired (I)bundle state. V(present) ensures the bundle - is installed while V(absent) ensures the (I)bundle is not installed. + - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle is not installed. default: present choices: [present, absent] type: str @@ -73,9 +67,9 @@ options: description: - URL for version string download. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the OS to the latest version community.general.swupd: update: true @@ -98,18 +92,18 @@ EXAMPLES = ''' community.general.swupd: verify: true manifest: 12920 -''' +""" -RETURN = ''' +RETURN = r""" stdout: - description: stdout of swupd + description: C(stdout) of C(swupd). returned: always type: str stderr: - description: stderr of swupd + description: C(stderr) of C(swupd). returned: always type: str -''' +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py index 3a7abf4fbe..ca9aebfcfc 100644 --- a/plugins/modules/syslogger.py +++ b/plugins/modules/syslogger.py @@ -7,55 +7,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syslogger short_description: Log messages in the syslog description: - - Uses syslog to add log entries to the host. + - Uses syslog to add log entries to the host. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - msg: - type: str - description: - - This is the message to place in syslog. - required: true - priority: - type: str - description: - - Set the log priority. - choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] - default: "info" - facility: - type: str - description: - - Set the log facility. - choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", - "uucp", "cron", "syslog", "local0", "local1", "local2", - "local3", "local4", "local5", "local6", "local7" ] - default: "daemon" - log_pid: - description: - - Log the PID in brackets. - type: bool - default: false - ident: - description: - - Specify the name of application name which is sending the log to syslog. - type: str - default: 'ansible_syslogger' - version_added: '0.2.0' + msg: + type: str + description: + - This is the message to place in syslog. + required: true + priority: + type: str + description: + - Set the log priority. + choices: ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] + default: "info" + facility: + type: str + description: + - Set the log facility. + choices: ["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2", "local3", "local4", + "local5", "local6", "local7"] + default: "daemon" + log_pid: + description: + - Log the PID in brackets. + type: bool + default: false + ident: + description: + - Specify the name of application name which is sending the log to syslog. + type: str + default: 'ansible_syslogger' + version_added: '0.2.0' author: - - Tim Rightnour (@garbled1) -''' + - Tim Rightnour (@garbled1) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Simple Usage community.general.syslogger: msg: "I will end up as daemon.info" @@ -72,36 +70,36 @@ EXAMPLES = r''' ident: "MyApp" msg: "I want to believe" priority: "alert" -''' +""" -RETURN = r''' +RETURN = r""" ident: - description: Name of application sending the message to log + description: Name of application sending the message to log. returned: always type: str sample: "ansible_syslogger" version_added: '0.2.0' priority: - description: Priority level + description: Priority level. returned: always type: str sample: "daemon" facility: - description: Syslog facility + description: Syslog facility. returned: always type: str sample: "info" log_pid: - description: Log PID status + description: Log PID status. returned: always type: bool sample: true msg: - description: Message sent to syslog + description: Message sent to syslog. returned: always type: str sample: "Hello from Ansible" -''' +""" import syslog import traceback diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py index c90ef0d227..3cedc220f7 100644 --- a/plugins/modules/syspatch.py +++ b/plugins/modules/syspatch.py @@ -8,37 +8,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syspatch short_description: Manage OpenBSD system patches description: - - "Manage OpenBSD system patches using syspatch." - + - Manage OpenBSD system patches using syspatch. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - revert: - description: - - Revert system patches. - type: str - choices: [ all, one ] + revert: + description: + - Revert system patches. + type: str + choices: [all, one] author: - - Andrew Klaus (@precurse) -''' + - Andrew Klaus (@precurse) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Apply all available system patches community.general.syspatch: @@ -58,20 +56,20 @@ EXAMPLES = ''' - name: Reboot if patch requires it ansible.builtin.reboot: when: syspatch.reboot_needed -''' +""" -RETURN = r''' +RETURN = r""" rc: - description: The command return code (0 means success) + description: The command return code (0 means success). returned: always type: int stdout: - description: syspatch standard output. + description: C(syspatch) standard output. returned: always type: str sample: "001_rip6cksum" stderr: - description: syspatch standard error. + description: C(syspatch) standard error. returned: always type: str sample: "syspatch: need root privileges" @@ -80,7 +78,7 @@ reboot_needed: returned: always type: bool sample: true -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py index 6780975d4f..d93bccd620 100644 --- a/plugins/modules/sysrc.py +++ b/plugins/modules/sysrc.py @@ -9,64 +9,62 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - David Lundgren (@dlundgren) + - David Lundgren (@dlundgren) module: sysrc short_description: Manage FreeBSD using sysrc version_added: '2.0.0' description: - - Manages C(/etc/rc.conf) for FreeBSD. + - Manages C(/etc/rc.conf) for FreeBSD. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of variable in C(/etc/rc.conf) to manage. - type: str - required: true - value: - description: - - The value to set when O(state=present). - - The value to add when O(state=value_present). - - The value to remove when O(state=value_absent). - type: str - state: - description: - - Use V(present) to add the variable. - - Use V(absent) to remove the variable. - - Use V(value_present) to add the value to the existing variable. - - Use V(value_absent) to remove the value from the existing variable. - type: str - default: "present" - choices: [ absent, present, value_present, value_absent ] - path: - description: - - Path to file to use instead of V(/etc/rc.conf). - type: str - default: "/etc/rc.conf" - delim: - description: - - Delimiter to be used instead of V(" ") (space). - - Only used when O(state=value_present) or O(state=value_absent). - default: " " - type: str - jail: - description: - - Name or ID of the jail to operate on. - type: str + name: + description: + - Name of variable in C(/etc/rc.conf) to manage. + type: str + required: true + value: + description: + - The value to set when O(state=present). + - The value to add when O(state=value_present). + - The value to remove when O(state=value_absent). + type: str + state: + description: + - Use V(present) to add the variable. + - Use V(absent) to remove the variable. + - Use V(value_present) to add the value to the existing variable. + - Use V(value_absent) to remove the value from the existing variable. + type: str + default: "present" + choices: [absent, present, value_present, value_absent] + path: + description: + - Path to file to use instead of V(/etc/rc.conf). + type: str + default: "/etc/rc.conf" + delim: + description: + - Delimiter to be used instead of V(" ") (space). + - Only used when O(state=value_present) or O(state=value_absent). + default: " " + type: str + jail: + description: + - Name or ID of the jail to operate on. + type: str notes: - The O(name) cannot contain periods as sysrc does not support OID style names. -''' +""" -EXAMPLES = r''' ---- +EXAMPLES = r""" # enable mysql in the /etc/rc.conf - name: Configure mysql pid file community.general.sysrc: @@ -94,15 +92,15 @@ EXAMPLES = r''' name: nginx_enable value: "YES" jail: testjail -''' +""" -RETURN = r''' +RETURN = r""" changed: description: Return changed for sysrc actions. returned: always type: bool sample: true -''' +""" from ansible.module_utils.basic import AnsibleModule import re diff --git a/plugins/modules/sysupgrade.py b/plugins/modules/sysupgrade.py index 639fa345ad..26232cd98d 100644 --- a/plugins/modules/sysupgrade.py +++ b/plugins/modules/sysupgrade.py @@ -8,54 +8,53 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sysupgrade short_description: Manage OpenBSD system upgrades version_added: 1.1.0 description: - - Manage OpenBSD system upgrades using sysupgrade. + - Manage OpenBSD system upgrades using C(sysupgrade). extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - snapshot: - description: - - Apply the latest snapshot. - - Otherwise release will be applied. - default: false - type: bool - force: - description: - - Force upgrade (for snapshots only). - default: false - type: bool - keep_files: - description: - - Keep the files under /home/_sysupgrade. - - By default, the files will be deleted after the upgrade. - default: false - type: bool - fetch_only: - description: - - Fetch and verify files and create /bsd.upgrade but do not reboot. - - Set to V(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. - default: true - type: bool - installurl: - description: - - OpenBSD mirror top-level URL for fetching an upgrade. - - By default, the mirror URL is pulled from /etc/installurl. - type: str + snapshot: + description: + - Apply the latest snapshot. + - Otherwise release will be applied. + default: false + type: bool + force: + description: + - Force upgrade (for snapshots only). + default: false + type: bool + keep_files: + description: + - Keep the files under C(/home/_sysupgrade). + - By default, the files will be deleted after the upgrade. + default: false + type: bool + fetch_only: + description: + - Fetch and verify files and create C(/bsd.upgrade) but do not reboot. + - Set to V(false) if you want C(sysupgrade) to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. + default: true + type: bool + installurl: + description: + - OpenBSD mirror top-level URL for fetching an upgrade. + - By default, the mirror URL is pulled from C(/etc/installurl). + type: str author: - - Andrew Klaus (@precurse) -''' + - Andrew Klaus (@precurse) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Upgrade to latest release community.general.sysupgrade: register: sysupgrade @@ -77,9 +76,9 @@ EXAMPLES = r''' community.general.sysupgrade: fetch_only: false ignore_errors: true -''' +""" -RETURN = r''' +RETURN = r""" rc: description: The command return code (0 means success). returned: always @@ -93,7 +92,7 @@ stderr: returned: always type: str sample: "sysupgrade: need root privileges" -''' +""" from ansible.module_utils.basic import AnsibleModule From 825e0ee377f3bfaea9528d9e76c6041cc9ab193c Mon Sep 17 00:00:00 2001 From: Eric <8869330+erichoog@users.noreply.github.com> Date: Wed, 25 Dec 2024 03:48:56 -0500 Subject: [PATCH 404/482] zypper: add simple_errors option - fixes #8416 (#9270) * zypper: add simple_errors option -fixes #8416 * Fix style issues * Apply suggestions from code review Co-authored-by: Felix Fontein * Fix indentation * Add changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * Updated as per code review recommendations * Fix whitespace * Add quiet option, fix logic, update changelog * Fix trailing whitespace * Update plugins/modules/zypper.py Co-authored-by: Felix Fontein * Add suggested improvements --------- Co-authored-by: Eric Hoogeveen Co-authored-by: Felix Fontein --- .../9270-zypper-add-simple_errors.yaml | 3 ++ plugins/modules/zypper.py | 48 ++++++++++++++++++- 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/9270-zypper-add-simple_errors.yaml diff --git a/changelogs/fragments/9270-zypper-add-simple_errors.yaml b/changelogs/fragments/9270-zypper-add-simple_errors.yaml new file mode 100644 index 0000000000..9fcdf3403c --- /dev/null +++ b/changelogs/fragments/9270-zypper-add-simple_errors.yaml @@ -0,0 +1,3 @@ +minor_changes: + - zypper - add ``simple_errors`` option (https://github.com/ansible-collections/community.general/pull/9270). + - zypper - add ``quiet`` option (https://github.com/ansible-collections/community.general/pull/9270). \ No newline at end of file diff --git a/plugins/modules/zypper.py b/plugins/modules/zypper.py index ac5b6657ba..5bc6c766a0 100644 --- a/plugins/modules/zypper.py +++ b/plugins/modules/zypper.py @@ -142,6 +142,20 @@ options: description: - Adds C(--clean-deps) option to I(zypper) remove command. version_added: '4.6.0' + simple_errors: + type: bool + required: false + default: false + description: + - When set to V(true), provide a simplified error output (parses only the C() tag text in the XML output). + version_added: '10.2.0' + quiet: + type: bool + required: false + default: true + description: + - Adds C(--quiet) option to I(zypper) install/update command. + version_added: '10.2.0' notes: - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option. @@ -190,6 +204,13 @@ EXAMPLES = r""" name: '*' state: latest +- name: Install latest packages but dump error messages in a simplified format + community.general.zypper: + name: '*' + state: latest + simple_errors: true + quiet: false + - name: Apply all available patches community.general.zypper: name: '*' @@ -347,15 +368,38 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + # apply simple_errors logic to rc 0,102,103,106 + if m.params['simple_errors']: + stdout = get_simple_errors(dom) or stdout + return packages, rc, stdout, stderr + + # apply simple_errors logic to rc other than 0,102,103,106 + if m.params['simple_errors']: + stdout = get_simple_errors(dom) or stdout + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) +def get_simple_errors(dom): + simple_errors = [] + message_xml_tags = dom.getElementsByTagName('message') + + if message_xml_tags is None: + return None + + for x in message_xml_tags: + simple_errors.append(x.firstChild.data) + return " \n".join(simple_errors) + + def get_cmd(m, subcommand): "puts together the basic zypper command arguments with those passed to the module" is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] is_refresh = subcommand == 'refresh' - cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] + cmd = [m.get_bin_path('zypper', required=True), '--non-interactive', '--xmlout'] + if m.params['quiet']: + cmd.append('--quiet') if transactional_updates(): cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd if m.params['extra_args_precommand']: @@ -555,6 +599,8 @@ def main(): allow_vendor_change=dict(required=False, default=False, type='bool'), replacefiles=dict(required=False, default=False, type='bool'), clean_deps=dict(required=False, default=False, type='bool'), + simple_errors=dict(required=False, default=False, type='bool'), + quiet=dict(required=False, default=True, type='bool'), ), supports_check_mode=True ) From 70b62ed7453efc286bb912e888dd59643d39e40e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 09:16:10 +1300 Subject: [PATCH 405/482] s[a-c]*: normalize docs (#9353) * s[a-c]*: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/say.py | 17 ++- plugins/modules/scaleway_compute.py | 53 ++++---- .../scaleway_compute_private_network.py | 31 ++--- plugins/modules/scaleway_container.py | 18 +-- plugins/modules/scaleway_container_info.py | 13 +- .../modules/scaleway_container_namespace.py | 15 +-- .../scaleway_container_namespace_info.py | 15 +-- .../modules/scaleway_container_registry.py | 13 +- .../scaleway_container_registry_info.py | 13 +- plugins/modules/scaleway_database_backup.py | 125 +++++++++--------- plugins/modules/scaleway_function.py | 20 +-- plugins/modules/scaleway_function_info.py | 15 +-- .../modules/scaleway_function_namespace.py | 15 +-- .../scaleway_function_namespace_info.py | 15 +-- plugins/modules/scaleway_image_info.py | 16 +-- plugins/modules/scaleway_ip.py | 38 +++--- plugins/modules/scaleway_ip_info.py | 16 +-- plugins/modules/scaleway_lb.py | 28 ++-- plugins/modules/scaleway_organization_info.py | 15 +-- plugins/modules/scaleway_private_network.py | 29 ++-- plugins/modules/scaleway_security_group.py | 25 ++-- .../modules/scaleway_security_group_info.py | 17 +-- .../modules/scaleway_security_group_rule.py | 37 +++--- plugins/modules/scaleway_server_info.py | 16 +-- plugins/modules/scaleway_snapshot_info.py | 16 +-- plugins/modules/scaleway_sshkey.py | 23 ++-- plugins/modules/scaleway_user_data.py | 29 ++-- plugins/modules/scaleway_volume.py | 19 ++- plugins/modules/scaleway_volume_info.py | 16 +-- 29 files changed, 336 insertions(+), 382 deletions(-) diff --git a/plugins/modules/say.py b/plugins/modules/say.py index 175e5feb0b..2dc359083d 100644 --- a/plugins/modules/say.py +++ b/plugins/modules/say.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: say short_description: Makes a computer to speak description: - - makes a computer speak! Amuse your friends, annoy your coworkers! + - Makes a computer speak! Amuse your friends, annoy your coworkers! notes: - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - If you like this module, you may also be interested in the osx_say callback plugin. @@ -37,19 +36,19 @@ options: description: - What voice to use. required: false -requirements: [ say or espeak or espeak-ng ] +requirements: [say or espeak or espeak-ng] author: - - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" -''' + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Makes a computer to speak community.general.say: msg: '{{ inventory_hostname }} is all done' voice: Zarvox delegate_to: localhost -''' +""" import platform from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index d8480c199d..c61030bede 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_compute short_description: Scaleway compute management module author: Remy Leone (@remyleone) description: - - "This module manages compute instances on Scaleway." + - This module manages compute instances on Scaleway. extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -35,35 +34,33 @@ options: public_ip: type: str description: - - Manage public IP on a Scaleway server - - Could be Scaleway IP address UUID - - V(dynamic) Means that IP is destroyed at the same time the host is destroyed - - V(absent) Means no public IP at all + - Manage public IP on a Scaleway server. + - Could be Scaleway IP address UUID. + - V(dynamic) Means that IP is destroyed at the same time the host is destroyed. + - V(absent) Means no public IP at all. default: absent enable_ipv6: description: - - Enable public IPv6 connectivity on the instance + - Enable public IPv6 connectivity on the instance. default: false type: bool image: type: str description: - - Image identifier used to start the instance with + - Image identifier used to start the instance with. required: true name: type: str description: - - Name of the instance - + - Name of the instance. organization: type: str description: - Organization identifier. - Exactly one of O(project) and O(organization) must be specified. - project: type: str description: @@ -74,7 +71,7 @@ options: state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -87,14 +84,14 @@ options: type: list elements: str description: - - List of tags to apply to the instance (5 max) + - List of tags to apply to the instance (5 max). required: false default: [] region: type: str description: - - Scaleway compute zone + - Scaleway compute zone. required: true choices: - ams1 @@ -109,38 +106,38 @@ options: commercial_type: type: str description: - - Commercial name of the compute node + - Commercial name of the compute node. required: true wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool default: false wait_timeout: type: int description: - - Time to wait for the server to reach the expected state + - Time to wait for the server to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the server + - Time to wait before every attempt to check the state of the server. required: false default: 3 security_group: type: str description: - - Security group unique identifier - - If no value provided, the default security group or current security group will be used + - Security group unique identifier. + - If no value provided, the default security group or current security group will be used. required: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a server community.general.scaleway_compute: name: foobar @@ -174,10 +171,10 @@ EXAMPLES = ''' project: 951df375-e094-4d26-97c1-ba548eeb9c42 region: ams1 commercial_type: VC1S -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime import time diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py index b41720be58..5339dfef15 100644 --- a/plugins/modules/scaleway_compute_private_network.py +++ b/plugins/modules/scaleway_compute_private_network.py @@ -11,18 +11,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_compute_private_network short_description: Scaleway compute - private network management version_added: 5.2.0 author: Pascal MANGIN (@pastral) description: - - This module add or remove a private network to a compute instance - (U(https://developer.scaleway.com)). + - This module add or remove a private network to a compute instance (U(https://developer.scaleway.com)). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +32,7 @@ options: state: type: str description: - - Indicate desired state of the VPC. + - Indicate desired state of the VPC. default: present choices: - present @@ -49,7 +47,7 @@ options: region: type: str description: - - Scaleway region to use (for example V(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 @@ -64,18 +62,17 @@ options: compute_id: type: str description: - - ID of the compute instance (see M(community.general.scaleway_compute)). + - ID of the compute instance (see M(community.general.scaleway_compute)). required: true private_network_id: type: str description: - - ID of the private network (see M(community.general.scaleway_private_network)). + - ID of the private network (see M(community.general.scaleway_private_network)). required: true +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Plug a VM to a private network community.general.scaleway_compute_private_network: project: '{{ scw_project }}' @@ -92,10 +89,9 @@ EXAMPLES = ''' region: par1 compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" +""" -''' - -RETURN = ''' +RETURN = r""" scaleway_compute_private_network: description: Information on the VPC. returned: success when O(state=present) @@ -117,7 +113,8 @@ scaleway_compute_private_network: "updated_at": "2022-01-15T11:12:04.624837Z", "zone": "fr-par-2" } -''' +""" + from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py index a18cb1d75f..5cdd12da5d 100644 --- a/plugins/modules/scaleway_container.py +++ b/plugins/modules/scaleway_container.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container short_description: Scaleway Container management version_added: 6.0.0 @@ -109,7 +108,8 @@ options: privacy: description: - Privacy policies define whether a container can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the + Scaleway API. type: str default: public choices: @@ -147,9 +147,9 @@ options: - Redeploy the container if update is required. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container community.general.scaleway_container: namespace_id: '{{ scw_container_namespace }}' @@ -169,9 +169,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container -''' +""" -RETURN = ''' +RETURN = r""" container: description: The container information. returned: when O(state=present) @@ -181,7 +181,7 @@ container: description: Container used for testing scaleway_container ansible module domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud environment_variables: - MY_VAR: my_value + MY_VAR: my_value error_message: null http_option: "" id: c9070eb0-d7a4-48dd-9af3-4fb139890721 @@ -201,7 +201,7 @@ container: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from copy import deepcopy diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py index 350c96e545..28cf40ac50 100644 --- a/plugins/modules/scaleway_container_info.py +++ b/plugins/modules/scaleway_container_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_info short_description: Retrieve information on Scaleway Container version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the container. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container info community.general.scaleway_container_info: namespace_id: '{{ scw_container_namespace }}' region: fr-par name: my-awesome-container register: container_info_task -''' +""" -RETURN = ''' +RETURN = r""" container: description: The container information. returned: always @@ -87,7 +86,7 @@ container: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py index 0f5de6c31d..802a491321 100644 --- a/plugins/modules/scaleway_container_namespace.py +++ b/plugins/modules/scaleway_container_namespace.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_namespace short_description: Scaleway Container namespace management version_added: 6.0.0 @@ -24,7 +23,7 @@ extends_documentation_fragment: - community.general.scaleway_waitable_resource - community.general.attributes requirements: - - passlib[argon2] >= 1.7.4 + - passlib[argon2] >= 1.7.4 attributes: check_mode: @@ -84,9 +83,9 @@ options: - Injected in containers at runtime. type: dict default: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container namespace community.general.scaleway_container_namespace: project_id: '{{ scw_project }}' @@ -105,9 +104,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container-namespace -''' +""" -RETURN = ''' +RETURN = r""" container_namespace: description: The container namespace information. returned: when O(state=present) @@ -128,7 +127,7 @@ container_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from copy import deepcopy diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py index d783747203..d1e7196871 100644 --- a/plugins/modules/scaleway_container_namespace_info.py +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_namespace_info short_description: Retrieve information on Scaleway Container namespace version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the container namespace. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container namespace info community.general.scaleway_container_namespace_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-container-namespace register: container_namespace_info_task -''' +""" -RETURN = ''' +RETURN = r""" container_namespace: description: The container namespace information. returned: always @@ -66,7 +65,7 @@ container_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-container-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -78,7 +77,7 @@ container_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py index 4f17fecad7..132dfe8bb6 100644 --- a/plugins/modules/scaleway_container_registry.py +++ b/plugins/modules/scaleway_container_registry.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_registry short_description: Scaleway Container registry management module version_added: 5.8.0 @@ -77,9 +76,9 @@ options: - public - private default: private -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container registry community.general.scaleway_container_registry: project_id: '{{ scw_project }}' @@ -94,9 +93,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container-registry -''' +""" -RETURN = ''' +RETURN = r""" container_registry: description: The container registry information. returned: when O(state=present) @@ -116,7 +115,7 @@ container_registry: status: ready status_message: "" updated_at: "2022-10-14T09:51:07.949716Z" -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py index 7645789cff..e0fc1db5f3 100644 --- a/plugins/modules/scaleway_container_registry_info.py +++ b/plugins/modules/scaleway_container_registry_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_registry_info short_description: Scaleway Container registry info module version_added: 5.8.0 @@ -46,18 +45,18 @@ options: description: - Name of the container registry. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container registry info community.general.scaleway_container_registry_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-container-registry register: container_registry_info_task -''' +""" -RETURN = ''' +RETURN = r""" container_registry: description: The container registry information. returned: always @@ -77,7 +76,7 @@ container_registry: status: ready status_message: "" updated_at: "2022-10-14T09:51:07.949716Z" -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py index 1d0c17fb6d..b19a6b49bd 100644 --- a/plugins/modules/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -12,17 +12,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_database_backup short_description: Scaleway database backups management module version_added: 1.2.0 author: Guillaume Rodriguez (@guillaume_ro_fr) description: - - "This module manages database backups on Scaleway account U(https://developer.scaleway.com)." + - This module manages database backups on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: support: full @@ -31,118 +30,118 @@ attributes: options: state: description: - - Indicate desired state of the database backup. - - V(present) creates a backup. - - V(absent) deletes the backup. - - V(exported) creates a download link for the backup. - - V(restored) restores the backup to a new database. + - Indicate desired state of the database backup. + - V(present) creates a backup. + - V(absent) deletes the backup. + - V(exported) creates a download link for the backup. + - V(restored) restores the backup to a new database. type: str default: present choices: - - present - - absent - - exported - - restored + - present + - absent + - exported + - restored region: description: - - Scaleway region to use (for example V(fr-par)). + - Scaleway region to use (for example V(fr-par)). type: str required: true choices: - - fr-par - - nl-ams - - pl-waw + - fr-par + - nl-ams + - pl-waw id: description: - - UUID used to identify the database backup. - - Required for V(absent), V(exported) and V(restored) states. + - UUID used to identify the database backup. + - Required for V(absent), V(exported) and V(restored) states. type: str name: description: - - Name used to identify the database backup. - - Required for V(present) state. - - Ignored when O(state=absent), O(state=exported) or O(state=restored). + - Name used to identify the database backup. + - Required for V(present) state. + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false database_name: description: - - Name used to identify the database. - - Required for V(present) and V(restored) states. - - Ignored when O(state=absent) or O(state=exported). + - Name used to identify the database. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false instance_id: description: - - UUID of the instance associated to the database backup. - - Required for V(present) and V(restored) states. - - Ignored when O(state=absent) or O(state=exported). + - UUID of the instance associated to the database backup. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false expires_at: description: - - Expiration datetime of the database backup (ISO 8601 format). - - Ignored when O(state=absent), O(state=exported) or O(state=restored). + - Expiration datetime of the database backup (ISO 8601 format). + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool default: false wait_timeout: description: - - Time to wait for the backup to reach the expected state. + - Time to wait for the backup to reach the expected state. type: int required: false default: 300 wait_sleep_time: description: - - Time to wait before every attempt to check the state of the backup. + - Time to wait before every attempt to check the state of the backup. type: int required: false default: 3 -''' +""" -EXAMPLES = ''' - - name: Create a backup - community.general.scaleway_database_backup: - name: 'my_backup' - state: present - region: 'fr-par' - database_name: 'my-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +EXAMPLES = r""" +- name: Create a backup + community.general.scaleway_database_backup: + name: 'my_backup' + state: present + region: 'fr-par' + database_name: 'my-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Export a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: exported - region: 'fr-par' +- name: Export a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: exported + region: 'fr-par' - - name: Restore a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: restored - region: 'fr-par' - database_name: 'my-new-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +- name: Restore a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: restored + region: 'fr-par' + database_name: 'my-new-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Remove a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: absent - region: 'fr-par' -''' +- name: Remove a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: absent + region: 'fr-par' +""" -RETURN = ''' +RETURN = r""" metadata: description: Backup metadata. returned: when O(state=present), O(state=exported), or O(state=restored) @@ -164,7 +163,7 @@ metadata: "updated_at": "2020-08-06T12:42:10.581649Z" } } -''' +""" import datetime import time diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py index 2de0afd987..a5e81c37e4 100644 --- a/plugins/modules/scaleway_function.py +++ b/plugins/modules/scaleway_function.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function short_description: Scaleway Function management version_added: 6.0.0 @@ -97,8 +96,8 @@ options: runtime: description: - - Runtime of the function - - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes + - Runtime of the function. + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes. type: str required: true @@ -121,7 +120,8 @@ options: privacy: description: - Privacy policies define whether a function can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the + Scaleway API. type: str default: public choices: @@ -133,9 +133,9 @@ options: - Redeploy the function if update is required. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a function community.general.scaleway_function: namespace_id: '{{ scw_function_namespace }}' @@ -155,9 +155,9 @@ EXAMPLES = ''' region: fr-par state: absent name: my-awesome-function -''' +""" -RETURN = ''' +RETURN = r""" function: description: The function information. returned: when O(state=present) @@ -186,7 +186,7 @@ function: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from copy import deepcopy diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py index d65987664c..7a3acef11e 100644 --- a/plugins/modules/scaleway_function_info.py +++ b/plugins/modules/scaleway_function_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_info short_description: Retrieve information on Scaleway Function version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the function. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a function info community.general.scaleway_function_info: namespace_id: '{{ scw_function_namespace }}' region: fr-par name: my-awesome-function register: function_info_task -''' +""" -RETURN = ''' +RETURN = r""" function: description: The function information. returned: always @@ -68,7 +67,7 @@ function: domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud environment_variables: MY_VAR: my_value - error_message: null + error_message: handler: handler.handle http_option: "" id: ceb64dc4-4464-4196-8e20-ecef705475d3 @@ -86,7 +85,7 @@ function: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py index 7779761e38..d43b42bc7f 100644 --- a/plugins/modules/scaleway_function_namespace.py +++ b/plugins/modules/scaleway_function_namespace.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_namespace short_description: Scaleway Function namespace management version_added: 6.0.0 @@ -84,9 +83,9 @@ options: - Injected in functions at runtime. type: dict default: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a function namespace community.general.scaleway_function_namespace: project_id: '{{ scw_project }}' @@ -105,9 +104,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-function-namespace -''' +""" -RETURN = ''' +RETURN = r""" function_namespace: description: The function namespace information. returned: when O(state=present) @@ -116,7 +115,7 @@ function_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-function-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -128,7 +127,7 @@ function_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from copy import deepcopy diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py index d5d48ee4dd..f2bed200dc 100644 --- a/plugins/modules/scaleway_function_namespace_info.py +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_namespace_info short_description: Retrieve information on Scaleway Function namespace version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the function namespace. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a function namespace info community.general.scaleway_function_namespace_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-function-namespace register: function_namespace_info_task -''' +""" -RETURN = ''' +RETURN = r""" function_namespace: description: The function namespace information. returned: always @@ -66,7 +65,7 @@ function_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-function-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -78,7 +77,7 @@ function_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py index bdae185148..0f6d1539c8 100644 --- a/plugins/modules/scaleway_image_info.py +++ b/plugins/modules/scaleway_image_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_image_info short_description: Gather information about the Scaleway images available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway images information community.general.scaleway_image_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_image_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_image_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -91,7 +89,7 @@ scaleway_image_info: "state": "available" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py index 79f0c7e3fb..4fad2faf61 100644 --- a/plugins/modules/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -11,17 +11,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_ip short_description: Scaleway IP management module author: Remy Leone (@remyleone) description: - - This module manages IP on Scaleway account - U(https://developer.scaleway.com) + - This module manages IP on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -33,7 +31,7 @@ options: state: type: str description: - - Indicate desired state of the IP. + - Indicate desired state of the IP. default: present choices: - present @@ -42,13 +40,13 @@ options: organization: type: str description: - - Scaleway organization identifier + - Scaleway organization identifier. required: true region: type: str description: - - Scaleway region to use (for example par1). + - Scaleway region to use (for example par1). required: true choices: - ams1 @@ -63,21 +61,19 @@ options: id: type: str description: - - id of the Scaleway IP (UUID) - + - Id of the Scaleway IP (UUID). server: type: str description: - - id of the server you want to attach an IP to. - - To unattach an IP don't specify this option - + - Id of the server you want to attach an IP to. + - To unattach an IP don't specify this option. reverse: type: str description: - - Reverse to assign to the IP -''' + - Reverse to assign to the IP. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an IP community.general.scaleway_ip: organization: '{{ scw_org }}' @@ -90,9 +86,9 @@ EXAMPLES = ''' id: '{{ ip_creation_task.scaleway_ip.id }}' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -110,8 +106,8 @@ data: "address": "212.47.232.136" } ] - } -''' + } +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py index 1fd4be5898..b597c7c42b 100644 --- a/plugins/modules/scaleway_ip_info.py +++ b/plugins/modules/scaleway_ip_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_ip_info short_description: Gather information about the Scaleway ips available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway ips information community.general.scaleway_ip_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_ip_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_ip_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)' returned: success type: list elements: dict @@ -71,7 +69,7 @@ scaleway_ip_info: } } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 6cc947755d..7e13c3843f 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_lb short_description: Scaleway load-balancer management module author: Remy Leone (@remyleone) description: - - "This module manages load-balancers on Scaleway." + - This module manages load-balancers on Scaleway. extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -53,7 +52,7 @@ options: state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -62,7 +61,7 @@ options: region: type: str description: - - Scaleway zone. + - Scaleway zone. required: true choices: - nl-ams @@ -74,30 +73,29 @@ options: elements: str default: [] description: - - List of tags to apply to the load-balancer. - + - List of tags to apply to the load-balancer. wait: description: - - Wait for the load-balancer to reach its desired state before returning. + - Wait for the load-balancer to reach its desired state before returning. type: bool default: false wait_timeout: type: int description: - - Time to wait for the load-balancer to reach the expected state. + - Time to wait for the load-balancer to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the load-balancer. + - Time to wait before every attempt to check the state of the load-balancer. required: false default: 3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a load-balancer community.general.scaleway_lb: name: foobar @@ -113,7 +111,7 @@ EXAMPLES = ''' state: absent organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 region: fr-par -''' +""" RETURNS = ''' { diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py index e9e272c988..603ab3cd4c 100644 --- a/plugins/modules/scaleway_organization_info.py +++ b/plugins/modules/scaleway_organization_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_organization_info short_description: Gather information about the Scaleway organizations available description: @@ -27,20 +26,18 @@ extends_documentation_fragment: - community.general.scaleway - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway organizations information community.general.scaleway_organization_info: register: result - ansible.builtin.debug: msg: "{{ result.scaleway_organization_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_organization_info: description: Response from Scaleway API. returned: success @@ -70,7 +67,7 @@ scaleway_organization_info: "warnings": [] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py index 0cc9b900f4..922a780098 100644 --- a/plugins/modules/scaleway_private_network.py +++ b/plugins/modules/scaleway_private_network.py @@ -11,17 +11,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_private_network short_description: Scaleway private network management version_added: 4.5.0 author: Pascal MANGIN (@pastral) description: - - "This module manages private network on Scaleway account (U(https://developer.scaleway.com))." + - This module manages private network on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -33,7 +32,7 @@ options: state: type: str description: - - Indicate desired state of the VPC. + - Indicate desired state of the VPC. default: present choices: - present @@ -48,7 +47,7 @@ options: region: type: str description: - - Scaleway region to use (for example V(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 @@ -63,18 +62,16 @@ options: name: type: str description: - - Name of the VPC. - + - Name of the VPC. tags: type: list elements: str description: - - List of tags to apply to the instance. + - List of tags to apply to the instance. default: [] +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an private network community.general.scaleway_vpc: project: '{{ scw_project }}' @@ -88,9 +85,9 @@ EXAMPLES = ''' name: 'foo' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" scaleway_private_network: description: Information on the VPC. returned: success when O(state=present) @@ -112,7 +109,7 @@ scaleway_private_network: "updated_at": "2022-01-15T11:12:04.624837Z", "zone": "fr-par-2" } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index 3aee99e99a..3e1a28275e 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -12,16 +12,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group short_description: Scaleway Security Group management module author: Antoine Barbare (@abarbare) description: - - "This module manages Security Group on Scaleway account U(https://developer.scaleway.com)." + - This module manages Security Group on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +33,7 @@ options: description: - Indicate desired state of the Security Group. type: str - choices: [ absent, present ] + choices: [absent, present] default: present organization: @@ -79,21 +78,21 @@ options: description: - Default policy for incoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] outbound_default_policy: description: - Default policy for outcoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] organization_default: description: - Create security group to be the default one. type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Security Group community.general.scaleway_security_group: state: present @@ -106,9 +105,9 @@ EXAMPLES = ''' outbound_default_policy: accept organization_default: false register: security_group_creation_task -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -127,7 +126,7 @@ data: "stateful": false } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py index fb28e87740..6664938e09 100644 --- a/plugins/modules/scaleway_security_group_info.py +++ b/plugins/modules/scaleway_security_group_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_info short_description: Gather information about the Scaleway security groups available description: @@ -36,10 +35,9 @@ extends_documentation_fragment: - community.general.scaleway - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway security groups information community.general.scaleway_security_group_info: region: par1 @@ -47,14 +45,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_security_group_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_security_group_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -75,7 +72,7 @@ scaleway_security_group_info: ] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py index 9cbb2eb57e..ec89d41f6c 100644 --- a/plugins/modules/scaleway_security_group_rule.py +++ b/plugins/modules/scaleway_security_group_rule.py @@ -12,13 +12,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - - "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)." + - This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - community.general.scaleway - community.general.attributes @@ -99,23 +98,23 @@ options: description: - Security Group unique identifier. required: true -''' +""" -EXAMPLES = ''' - - name: Create a Security Group Rule - community.general.scaleway_security_group_rule: - state: present - region: par1 - protocol: TCP - port: 80 - ip_range: 0.0.0.0/0 - direction: inbound - action: accept - security_group: b57210ee-1281-4820-a6db-329f78596ecb - register: security_group_rule_creation_task -''' +EXAMPLES = r""" +- name: Create a Security Group Rule + community.general.scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -133,7 +132,7 @@ data: "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py index 01e9410da8..39af47005e 100644 --- a/plugins/modules/scaleway_server_info.py +++ b/plugins/modules/scaleway_server_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_server_info short_description: Gather information about the Scaleway servers available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway servers information community.general.scaleway_server_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_server_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_server_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -157,7 +155,7 @@ scaleway_server_info: } } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py index 687f43c85b..6b932cced2 100644 --- a/plugins/modules/scaleway_snapshot_info.py +++ b/plugins/modules/scaleway_snapshot_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_snapshot_info short_description: Gather information about the Scaleway snapshots available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway snapshots information community.general.scaleway_snapshot_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_snapshot_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_snapshot_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -75,7 +73,7 @@ scaleway_snapshot_info: "volume_type": "l_ssd" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py index 5647f9cd05..37e8ec8c3b 100644 --- a/plugins/modules/scaleway_sshkey.py +++ b/plugins/modules/scaleway_sshkey.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_sshkey short_description: Scaleway SSH keys management module author: Remy Leone (@remyleone) description: - - "This module manages SSH keys on Scaleway account U(https://developer.scaleway.com)." + - This module manages SSH keys on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +33,7 @@ options: state: type: str description: - - Indicate desired state of the SSH key. + - Indicate desired state of the SSH key. default: present choices: - present @@ -42,7 +41,7 @@ options: ssh_pub_key: type: str description: - - The public SSH key as a string to add. + - The public SSH key as a string to add. required: true api_url: type: str @@ -50,9 +49,9 @@ options: - Scaleway API URL. default: 'https://account.scaleway.com' aliases: ['base_url'] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Add SSH key" community.general.scaleway_sshkey: ssh_pub_key: "ssh-rsa AAAA..." @@ -68,9 +67,9 @@ EXAMPLES = ''' ssh_pub_key: "ssh-rsa AAAA..." state: "present" oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -80,7 +79,7 @@ data: {"key": "ssh-rsa AAAA...."} ] } -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py index 72046ff532..f4f2c18624 100644 --- a/plugins/modules/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -13,17 +13,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_user_data short_description: Scaleway user_data management module author: Remy Leone (@remyleone) description: - - This module manages user_data on compute instances on Scaleway. - - It can be used to configure cloud-init for instance. + - This module manages user_data on compute instances on Scaleway. + - It can be used to configure cloud-init for instance. extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -36,20 +35,20 @@ options: server_id: type: str description: - - Scaleway Compute instance ID of the server. + - Scaleway Compute instance ID of the server. required: true user_data: type: dict description: - - User defined data. Typically used with C(cloud-init). - - Pass your C(cloud-init) script here as a string. + - User defined data. Typically used with C(cloud-init). + - Pass your C(cloud-init) script here as a string. required: false region: type: str description: - - Scaleway compute zone. + - Scaleway compute zone. required: true choices: - ams1 @@ -60,19 +59,19 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the cloud-init community.general.scaleway_user_data: server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' region: ams1 user_data: cloud-init: 'final_message: "Hello World!"' -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py index 46d72288e7..ed6a506742 100644 --- a/plugins/modules/scaleway_volume.py +++ b/plugins/modules/scaleway_volume.py @@ -12,16 +12,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_volume short_description: Scaleway volumes management module author: Henryk Konsek (@hekonsek) description: - - "This module manages volumes on Scaleway account U(https://developer.scaleway.com)." + - This module manages volumes on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -74,9 +73,9 @@ options: type: str description: - Type of the volume (for example 'l_ssd'). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create 10GB volume community.general.scaleway_volume: name: my-volume @@ -92,9 +91,9 @@ EXAMPLES = ''' name: my-volume state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -110,7 +109,7 @@ data: "volume_type": "l_ssd" } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py index 471845c43e..1b2e95f88c 100644 --- a/plugins/modules/scaleway_volume_info.py +++ b/plugins/modules/scaleway_volume_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_volume_info short_description: Gather information about the Scaleway volumes available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway volumes information community.general.scaleway_volume_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_volume_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_volume_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -73,7 +71,7 @@ scaleway_volume_info: "volume_type": "l_ssd" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( From 97514612956083d585c1141ec3dbb57483519162 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 09:19:49 +1300 Subject: [PATCH 406/482] r*: normalize docs (#9354) * r*: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein * Apply suggestions from code review --------- Co-authored-by: Felix Fontein --- plugins/modules/read_csv.py | 68 +- plugins/modules/redfish_command.py | 957 +++++++++--------- plugins/modules/redfish_config.py | 369 ++++--- plugins/modules/redfish_info.py | 542 +++++----- plugins/modules/redhat_subscription.py | 323 +++--- plugins/modules/redis.py | 144 ++- plugins/modules/redis_data.py | 106 +- plugins/modules/redis_data_incr.py | 37 +- plugins/modules/redis_data_info.py | 13 +- plugins/modules/redis_info.py | 23 +- plugins/modules/rhevm.py | 377 ++++--- plugins/modules/rhsm_release.py | 25 +- plugins/modules/rhsm_repository.py | 38 +- plugins/modules/riak.py | 27 +- plugins/modules/rocketchat.py | 41 +- plugins/modules/rollbar_deployment.py | 47 +- plugins/modules/rpm_ostree_pkg.py | 127 ++- plugins/modules/rundeck_acl_policy.py | 129 ++- .../modules/rundeck_job_executions_info.py | 102 +- plugins/modules/rundeck_job_run.py | 125 ++- plugins/modules/rundeck_project.py | 107 +- plugins/modules/runit.py | 78 +- 22 files changed, 1858 insertions(+), 1947 deletions(-) diff --git a/plugins/modules/read_csv.py b/plugins/modules/read_csv.py index 3c59013180..ce2631482b 100644 --- a/plugins/modules/read_csv.py +++ b/plugins/modules/read_csv.py @@ -8,16 +8,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: read_csv short_description: Read a CSV file description: -- Read a CSV file and return a list or a dictionary, containing one dictionary per row. + - Read a CSV file and return a list or a dictionary, containing one dictionary per row. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) extends_documentation_fragment: -- community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -26,58 +25,57 @@ attributes: options: path: description: - - The CSV filename to read data from. + - The CSV filename to read data from. type: path required: true - aliases: [ filename ] + aliases: [filename] key: description: - - The column name used as a key for the resulting dictionary. - - If O(key) is unset, the module returns a list of dictionaries, - where each dictionary is a row in the CSV file. + - The column name used as a key for the resulting dictionary. + - If O(key) is unset, the module returns a list of dictionaries, where each dictionary is a row in the CSV file. type: str dialect: description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include V(excel), V(excel-tab) or V(unix). + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). type: str default: excel fieldnames: description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. + - A list of field names for every column. + - This is needed if the CSV does not have a header. type: list elements: str unique: description: - - Whether the O(key) used is expected to be unique. + - Whether the O(key) used is expected to be unique. type: bool default: true delimiter: description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: str skipinitialspace: description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: bool strict: description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: bool seealso: - plugin: ansible.builtin.csvfile plugin_type: lookup description: Can be used to do selective lookups in CSV files from Jinja. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Example CSV file with header # # name,uid,gid @@ -118,9 +116,9 @@ EXAMPLES = r''' delimiter: ';' register: users delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" dict: description: The CSV content as a dictionary. returned: success @@ -139,13 +137,13 @@ list: returned: success type: list sample: - - name: dag - uid: 500 - gid: 500 - - name: jeroen - uid: 501 - gid: 500 -''' + - name: dag + uid: 500 + gid: 500 + - name: jeroen + uid: 501 + gid: 500 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index 829b77897d..edbbb18e5f 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redfish_command short_description: Manages Out-Of-Band controllers using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - Manages OOB controller ex. reboot, log management. - Manages OOB controller users ex. add, remove, update. - Manages system power ex. on, off, graceful and forced reboot. @@ -62,32 +60,32 @@ options: version_added: 2.3.0 id: required: false - aliases: [ account_id ] + aliases: [account_id] description: - ID of account to delete/modify. - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. type: str new_username: required: false - aliases: [ account_username ] + aliases: [account_username] description: - Username of account to add/delete/modify. type: str new_password: required: false - aliases: [ account_password ] + aliases: [account_password] description: - New password of account to add/modify. type: str roleid: required: false - aliases: [ account_roleid ] + aliases: [account_roleid] description: - Role of account to add/modify. type: str account_types: required: false - aliases: [ account_accounttypes ] + aliases: [account_accounttypes] description: - Array of account types to apply to a user account. type: list @@ -95,7 +93,7 @@ options: version_added: '7.2.0' oem_account_types: required: false - aliases: [ account_oemaccounttypes ] + aliases: [account_oemaccounttypes] description: - Array of OEM account types to apply to a user account. type: list @@ -109,15 +107,14 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this parameter changed from V(10) to V(60) - in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. type: int default: 60 boot_override_mode: description: - Boot mode when using an override. type: str - choices: [ Legacy, UEFI ] + choices: [Legacy, UEFI] version_added: 3.5.0 uefi_target: required: false @@ -131,7 +128,7 @@ options: type: str update_username: required: false - aliases: [ account_updatename ] + aliases: [account_updatename] description: - New user name for updating account_username. type: str @@ -219,11 +216,9 @@ options: update_custom_oem_header: required: false description: - - Optional OEM header, sent as separate form-data for - the Multipart HTTP push update. - - The header shall start with "Oem" according to DMTF - Redfish spec 12.6.2.2. - - For more details, see U(https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html) + - Optional OEM header, sent as separate form-data for the Multipart HTTP push update. + - The header shall start with "Oem" according to DMTF Redfish spec 12.6.2.2. + - For more details, see U(https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html). - If set, then O(update_custom_oem_params) is required too. type: str version_added: '10.1.0' @@ -232,18 +227,15 @@ options: description: - Custom OEM properties for HTTP Multipart Push updates. - If set, then O(update_custom_oem_header) is required too. - - The properties will be passed raw without any validation or conversion by Ansible. - This means the content can be a file, a string, or any other data. - If the content is a dict that should be converted to JSON, then the - content must be converted to JSON before passing it to this module using the - P(ansible.builtin.to_json#filter) filter. + - The properties will be passed raw without any validation or conversion by Ansible. This means the content can be a file, a string, or + any other data. If the content is a dict that should be converted to JSON, then the content must be converted to JSON before passing it + to this module using the P(ansible.builtin.to_json#filter) filter. type: raw version_added: '10.1.0' update_custom_oem_mime_type: required: false description: - - MIME Type for custom OEM properties for HTTP Multipart - Push updates. + - MIME Type for custom OEM properties for HTTP Multipart Push updates. type: str version_added: '10.1.0' virtual_media: @@ -299,10 +291,8 @@ options: type: str strip_etag_quotes: description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. type: bool default: false version_added: 3.7.0 @@ -316,7 +306,7 @@ options: description: - Mode to apply when reseting to default. type: str - choices: [ ResetAll, PreserveNetworkAndUsers, PreserveNetwork ] + choices: [ResetAll, PreserveNetworkAndUsers, PreserveNetwork] version_added: 8.6.0 wait: required: false @@ -336,9 +326,8 @@ options: required: false description: - SSL/TLS Ciphers to use for the request. - - 'When a list is provided, all ciphers are joined in order with V(:).' - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) - for more details. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. type: list elements: str @@ -347,514 +336,512 @@ options: author: - "Jose Delarosa (@jose-delarosa)" - "T S Kushal (@TSKushal)" -''' +""" -EXAMPLES = ''' - - name: Restart system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulRestart - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +EXAMPLES = r""" +- name: Restart system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulRestart + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Turn system power off - community.general.redfish_command: - category: Systems - command: PowerForceOff - resource_id: 437XR1138R2 +- name: Turn system power off + community.general.redfish_command: + category: Systems + command: PowerForceOff + resource_id: 437XR1138R2 - - name: Restart system power forcefully - community.general.redfish_command: - category: Systems - command: PowerForceRestart - resource_id: 437XR1138R2 +- name: Restart system power forcefully + community.general.redfish_command: + category: Systems + command: PowerForceRestart + resource_id: 437XR1138R2 - - name: Shutdown system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulShutdown - resource_id: 437XR1138R2 +- name: Shutdown system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulShutdown + resource_id: 437XR1138R2 - - name: Turn system power on - community.general.redfish_command: - category: Systems - command: PowerOn - resource_id: 437XR1138R2 +- name: Turn system power on + community.general.redfish_command: + category: Systems + command: PowerOn + resource_id: 437XR1138R2 - - name: Reboot system power - community.general.redfish_command: - category: Systems - command: PowerReboot - resource_id: 437XR1138R2 +- name: Reboot system power + community.general.redfish_command: + category: Systems + command: PowerReboot + resource_id: 437XR1138R2 - - name: Set one-time boot device to {{ bootdevice }} - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set one-time boot device to {{ bootdevice }} + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiTarget" - uefi_target: "/0x31/0x33/0x01/0x01" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiTarget" + uefi_target: "/0x31/0x33/0x01/0x01" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set one-time boot device to BootNext target of "Boot0001" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiBootNext" - boot_next: "Boot0001" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set one-time boot device to BootNext target of "Boot0001" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiBootNext" + boot_next: "Boot0001" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set persistent boot device override - community.general.redfish_command: - category: Systems - command: EnableContinuousBootOverride - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set persistent boot device override + community.general.redfish_command: + category: Systems + command: EnableContinuousBootOverride + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set one-time boot to BiosSetup - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - boot_next: BiosSetup - boot_override_mode: Legacy - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + boot_next: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Disable persistent boot device override - community.general.redfish_command: - category: Systems - command: DisableBootOverride +- name: Disable persistent boot device override + community.general.redfish_command: + category: Systems + command: DisableBootOverride - - name: Set system indicator LED to blink using security token for auth - community.general.redfish_command: - category: Systems - command: IndicatorLedBlink - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" +- name: Set system indicator LED to blink using security token for auth + community.general.redfish_command: + category: Systems + command: IndicatorLedBlink + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" - - name: Add user - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" +- name: Add user + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" - - name: Add user with specified account types - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - account_types: +- name: Add user with specified account types + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + account_types: - Redfish - WebUI - - name: Add user using new option aliases - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - account_roleid: "{{ account_roleid }}" +- name: Add user using new option aliases + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + account_roleid: "{{ account_roleid }}" - - name: Delete user - community.general.redfish_command: - category: Accounts - command: DeleteUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" +- name: Delete user + community.general.redfish_command: + category: Accounts + command: DeleteUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" - - name: Disable user - community.general.redfish_command: - category: Accounts - command: DisableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" +- name: Disable user + community.general.redfish_command: + category: Accounts + command: DisableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" - - name: Enable user - community.general.redfish_command: - category: Accounts - command: EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" +- name: Enable user + community.general.redfish_command: + category: Accounts + command: EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" - - name: Add and enable user - community.general.redfish_command: - category: Accounts - command: AddUser,EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" +- name: Add and enable user + community.general.redfish_command: + category: Accounts + command: AddUser,EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" - - name: Update user password - community.general.redfish_command: - category: Accounts - command: UpdateUserPassword - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" +- name: Update user password + community.general.redfish_command: + category: Accounts + command: UpdateUserPassword + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" - - name: Update user role - community.general.redfish_command: - category: Accounts - command: UpdateUserRole - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - roleid: "{{ roleid }}" +- name: Update user role + community.general.redfish_command: + category: Accounts + command: UpdateUserRole + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + roleid: "{{ roleid }}" - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_updatename: "{{ account_updatename }}" +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_updatename: "{{ account_updatename }}" - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - update_username: "{{ update_username }}" +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + update_username: "{{ update_username }}" - - name: Update AccountService properties - community.general.redfish_command: - category: Accounts - command: UpdateAccountServiceProperties - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_properties: - AccountLockoutThreshold: 5 - AccountLockoutDuration: 600 +- name: Update AccountService properties + community.general.redfish_command: + category: Accounts + command: UpdateAccountServiceProperties + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_properties: + AccountLockoutThreshold: 5 + AccountLockoutDuration: 600 - - name: Update user AccountTypes - community.general.redfish_command: - category: Accounts - command: UpdateUserAccountTypes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_types: - - Redfish - - WebUI +- name: Update user AccountTypes + community.general.redfish_command: + category: Accounts + command: UpdateUserAccountTypes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_types: + - Redfish + - WebUI - - name: Clear Manager Logs with a timeout of 20 seconds - community.general.redfish_command: - category: Manager - command: ClearLogs - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 +- name: Clear Manager Logs with a timeout of 20 seconds + community.general.redfish_command: + category: Manager + command: ClearLogs + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Set chassis indicator LED to blink using security token for auth - community.general.redfish_command: - category: Chassis - command: IndicatorLedBlink - resource_id: 1U - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" +- name: Set chassis indicator LED to blink using security token for auth + community.general.redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" +- name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" - - name: Clear Sessions - community.general.redfish_command: - category: Sessions - command: ClearSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Clear Sessions + community.general.redfish_command: + category: Sessions + command: ClearSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Simple update - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: https://example.com/myupdate.img +- name: Simple update + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: https://example.com/myupdate.img - - name: Simple update with additional options - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: //example.com/myupdate.img - update_protocol: FTP - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_creds: - username: operator - password: supersecretpwd +- name: Simple update with additional options + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: //example.com/myupdate.img + update_protocol: FTP + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_creds: + username: operator + password: supersecretpwd - - name: Multipart HTTP push update; timeout is 600 seconds to allow for a - large image transfer - community.general.redfish_command: - category: Update - command: MultipartHTTPPushUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 600 - update_image_file: ~/images/myupdate.img +- name: Multipart HTTP push update; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img - - name: Multipart HTTP push with additional options; timeout is 600 seconds - to allow for a large image transfer - community.general.redfish_command: - category: Update - command: MultipartHTTPPushUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 600 - update_image_file: ~/images/myupdate.img - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_oem_params: - PreserveConfiguration: false +- name: Multipart HTTP push with additional options; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_oem_params: + PreserveConfiguration: false - - name: Multipart HTTP push with custom OEM options - vars: - oem_payload: - ImageType: BMC - community.general.redfish_command: - category: Update - command: MultipartHTTPPushUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_file: ~/images/myupdate.img - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_custom_oem_header: OemParameters - update_custom_oem_mime_type: "application/json" - update_custom_oem_params: "{{ oem_payload | to_json }}" +- name: Multipart HTTP push with custom OEM options + vars: + oem_payload: + ImageType: BMC + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_custom_oem_header: OemParameters + update_custom_oem_mime_type: "application/json" + update_custom_oem_params: "{{ oem_payload | to_json }}" - - name: Perform requested operations to continue the update - community.general.redfish_command: - category: Update - command: PerformRequestedOperations - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_handle: /redfish/v1/TaskService/TaskMonitors/735 +- name: Perform requested operations to continue the update + community.general.redfish_command: + category: Update + command: PerformRequestedOperations + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 - - name: Insert Virtual Media - community.general.redfish_command: - category: Systems - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - media_types: - - CD - - DVD - resource_id: 1 +- name: Insert Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: 1 - - name: Insert Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - media_types: - - CD - - DVD - resource_id: BMC +- name: Insert Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: BMC - - name: Eject Virtual Media - community.general.redfish_command: - category: Systems - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - resource_id: 1 +- name: Eject Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: 1 - - name: Eject Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - resource_id: BMC +- name: Eject Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: BMC - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: GracefulRestart - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Restart manager power gracefully and wait for it to be available - community.general.redfish_command: - category: Manager - command: GracefulRestart - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - wait: True +- name: Restart manager power gracefully and wait for it to be available + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + wait: true - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulRestart - resource_id: BMC +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulRestart + resource_id: BMC - - name: Turn manager power off - community.general.redfish_command: - category: Manager - command: PowerForceOff - resource_id: BMC +- name: Turn manager power off + community.general.redfish_command: + category: Manager + command: PowerForceOff + resource_id: BMC - - name: Restart manager power forcefully - community.general.redfish_command: - category: Manager - command: PowerForceRestart - resource_id: BMC +- name: Restart manager power forcefully + community.general.redfish_command: + category: Manager + command: PowerForceRestart + resource_id: BMC - - name: Shutdown manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulShutdown - resource_id: BMC +- name: Shutdown manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulShutdown + resource_id: BMC - - name: Turn manager power on - community.general.redfish_command: - category: Manager - command: PowerOn - resource_id: BMC +- name: Turn manager power on + community.general.redfish_command: + category: Manager + command: PowerOn + resource_id: BMC - - name: Reboot manager power - community.general.redfish_command: - category: Manager - command: PowerReboot - resource_id: BMC +- name: Reboot manager power + community.general.redfish_command: + category: Manager + command: PowerReboot + resource_id: BMC - - name: Factory reset manager to defaults - community.general.redfish_command: - category: Manager - command: ResetToDefaults - resource_id: BMC - reset_to_defaults_mode: ResetAll +- name: Factory reset manager to defaults + community.general.redfish_command: + category: Manager + command: ResetToDefaults + resource_id: BMC + reset_to_defaults_mode: ResetAll - - name: Verify BIOS attributes - community.general.redfish_command: - category: Systems - command: VerifyBiosAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - bios_attributes: - SubNumaClustering: "Disabled" - WorkloadProfile: "Virtualization-MaxPerformance" -''' +- name: Verify BIOS attributes + community.general.redfish_command: + category: Systems + command: VerifyBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + bios_attributes: + SubNumaClustering: "Disabled" + WorkloadProfile: "Virtualization-MaxPerformance" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" return_values: - description: Dictionary containing command-specific response data from the action. - returned: on success - type: dict - version_added: 6.1.0 - sample: { - "update_status": { - "handle": "/redfish/v1/TaskService/TaskMonitors/735", - "messages": [], - "resets_requested": [], - "ret": true, - "status": "New" - } + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.1.0 + sample: { + "update_status": { + "handle": "/redfish/v1/TaskService/TaskMonitors/735", + "messages": [], + "resets_requested": [], + "ret": true, + "status": "New" } -''' + } +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 5b9caecc64..e47597f73f 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redfish_config short_description: Manages Out-Of-Band controllers using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - set or update a configuration attribute. + - Builds Redfish URIs locally and sends them to remote OOB controllers to set or update a configuration attribute. - Manages BIOS configuration settings. - Manages OOB controller configuration settings. extends_documentation_fragment: @@ -64,8 +62,7 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this parameter changed from V(10) to V(60) - in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. type: int default: 60 boot_order: @@ -111,10 +108,8 @@ options: version_added: '0.2.0' strip_etag_quotes: description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. type: bool default: false version_added: 3.7.0 @@ -165,15 +160,14 @@ options: description: - Setting parameter to enable or disable SecureBoot. type: bool - default: True + default: true version_added: '7.5.0' volume_details: required: false description: - Setting dict of volume to be created. - - If C(CapacityBytes) key is not specified in this dictionary, the size of - the volume will be determined by the Redfish service. It is possible the - size will not be the maximum available size. + - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume will be determined by the Redfish service. It is possible + the size will not be the maximum available size. type: dict default: {} version_added: '7.5.0' @@ -181,9 +175,8 @@ options: required: false description: - SSL/TLS Ciphers to use for the request. - - 'When a list is provided, all ciphers are joined in order with V(:).' - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) - for more details. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. type: list elements: str @@ -192,195 +185,195 @@ options: author: - "Jose Delarosa (@jose-delarosa)" - "T S Kushal (@TSKushal)" -''' +""" -EXAMPLES = ''' - - name: Set BootMode to UEFI - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Uefi" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +EXAMPLES = r""" +- name: Set BootMode to UEFI + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Uefi" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set multiple BootMode attributes - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Bios" - OneTimeBootMode: "Enabled" - BootSeqRetry: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set multiple BootMode attributes + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Enable PXE Boot for NIC1 - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - PxeDev1EnDis: Enabled - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Enable PXE Boot for NIC1 + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + PxeDev1EnDis: Enabled + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set BIOS default settings with a timeout of 20 seconds - community.general.redfish_config: - category: Systems - command: SetBiosDefaultSettings - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 +- name: Set BIOS default settings with a timeout of 20 seconds + community.general.redfish_config: + category: Systems + command: SetBiosDefaultSettings + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 - - name: Set boot order - community.general.redfish_config: - category: Systems - command: SetBootOrder - boot_order: - - Boot0002 - - Boot0001 - - Boot0000 - - Boot0003 - - Boot0004 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set boot order + community.general.redfish_config: + category: Systems + command: SetBootOrder + boot_order: + - Boot0002 + - Boot0001 + - Boot0000 + - Boot0003 + - Boot0004 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set boot order to the default - community.general.redfish_config: - category: Systems - command: SetDefaultBootOrder - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set boot order to the default + community.general.redfish_config: + category: Systems + command: SetDefaultBootOrder + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set Manager Network Protocols - community.general.redfish_config: - category: Manager - command: SetNetworkProtocols - network_protocols: - SNMP: - ProtocolEnabled: true - Port: 161 - HTTP: - ProtocolEnabled: false - Port: 8080 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set Manager Network Protocols + community.general.redfish_config: + category: Manager + command: SetNetworkProtocols + network_protocols: + SNMP: + ProtocolEnabled: true + Port: 161 + HTTP: + ProtocolEnabled: false + Port: 8080 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set Manager NIC - community.general.redfish_config: - category: Manager - command: SetManagerNic - nic_config: - DHCPv4: - DHCPEnabled: false - IPv4StaticAddresses: - Address: 192.168.1.3 - Gateway: 192.168.1.1 - SubnetMask: 255.255.255.0 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set Manager NIC + community.general.redfish_config: + category: Manager + command: SetManagerNic + nic_config: + DHCPv4: + DHCPEnabled: false + IPv4StaticAddresses: + Address: 192.168.1.3 + Gateway: 192.168.1.1 + SubnetMask: 255.255.255.0 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Disable Host Interface - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: false - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Disable Host Interface + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: false + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Enable Host Interface for HostInterface resource ID '2' - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: true - hostinterface_id: "2" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Enable Host Interface for HostInterface resource ID '2' + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: true + hostinterface_id: "2" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set SessionService Session Timeout to 30 minutes - community.general.redfish_config: - category: Sessions - command: SetSessionService - sessions_config: - SessionTimeout: 1800 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Set SessionService Session Timeout to 30 minutes + community.general.redfish_config: + category: Sessions + command: SetSessionService + sessions_config: + SessionTimeout: 1800 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Enable SecureBoot - community.general.redfish_config: - category: Systems - command: EnableSecureBoot - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Enable SecureBoot + community.general.redfish_config: + category: Systems + command: EnableSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Set SecureBoot - community.general.redfish_config: - category: Systems - command: SetSecureBoot - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - secure_boot_enable: True +- name: Set SecureBoot + community.general.redfish_config: + category: Systems + command: SetSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + secure_boot_enable: true - - name: Delete All Volumes - community.general.redfish_config: - category: Systems - command: DeleteVolumes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - storage_subsystem_id: "DExxxxxx" - volume_ids: ["volume1", "volume2"] +- name: Delete All Volumes + community.general.redfish_config: + category: Systems + command: DeleteVolumes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_ids: ["volume1", "volume2"] - - name: Create Volume - community.general.redfish_config: - category: Systems - command: CreateVolume - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - storage_subsystem_id: "DExxxxxx" - volume_details: - Name: "MR Volume" - RAIDType: "RAID0" - Drives: - - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1" +- name: Create Volume + community.general.redfish_config: + category: Systems + command: CreateVolume + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_details: + Name: "MR Volume" + RAIDType: "RAID0" + Drives: + - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1" - - name: Set service identification to {{ service_id }} - community.general.redfish_config: - category: Manager - command: SetServiceIdentification - service_id: "{{ service_id }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' +- name: Set service identification to {{ service_id }} + community.general.redfish_config: + category: Manager + command: SetServiceIdentification + service_id: "{{ service_id }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index b1b4a45ee5..e4e909ad48 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redfish_info short_description: Manages Out-Of-Band controllers using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote OOB controllers to get information back. - Information retrieved is placed in a location specified by the user. extends_documentation_fragment: - community.general.attributes @@ -63,8 +61,7 @@ options: timeout: description: - Timeout in seconds for HTTP requests to OOB controller. - - The default value for this parameter changed from V(10) to V(60) - in community.general 9.0.0. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. type: int default: 60 update_handle: @@ -77,318 +74,317 @@ options: required: false description: - SSL/TLS Ciphers to use for the request. - - 'When a list is provided, all ciphers are joined in order with V(:).' - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) - for more details. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. type: list elements: str version_added: 9.2.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Get CPU inventory - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +EXAMPLES = r""" +- name: Get CPU inventory + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" - - name: Get CPU model - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Get CPU model + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" - - name: Get memory inventory - community.general.redfish_info: - category: Systems - command: GetMemoryInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Get memory inventory + community.general.redfish_info: + category: Systems + command: GetMemoryInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Get fan inventory with a timeout of 20 seconds - community.general.redfish_info: - category: Chassis - command: GetFanInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result +- name: Get fan inventory with a timeout of 20 seconds + community.general.redfish_info: + category: Chassis + command: GetFanInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result - - name: Get Virtual Media information - community.general.redfish_info: - category: Manager - command: GetVirtualMedia - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Get Virtual Media information + community.general.redfish_info: + category: Manager + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" - - name: Get Virtual Media information from Systems - community.general.redfish_info: - category: Systems - command: GetVirtualMedia - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Get Virtual Media information from Systems + community.general.redfish_info: + category: Systems + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" - - name: Get Volume Inventory - community.general.redfish_info: - category: Systems - command: GetVolumeInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" +- name: Get Volume Inventory + community.general.redfish_info: + category: Systems + command: GetVolumeInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" - - name: Get Session information - community.general.redfish_info: - category: Sessions - command: GetSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result +- name: Get Session information + community.general.redfish_info: + category: Sessions + command: GetSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" - - name: Get default inventory information - community.general.redfish_info: - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts | to_nice_json }}" +- name: Get default inventory information + community.general.redfish_info: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts | to_nice_json }}" - - name: Get several inventories - community.general.redfish_info: - category: Systems - command: GetNicInventory,GetBiosAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get several inventories + community.general.redfish_info: + category: Systems + command: GetNicInventory,GetBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get default system inventory and user information - community.general.redfish_info: - category: Systems,Accounts - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get default system inventory and user information + community.general.redfish_info: + category: Systems,Accounts + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get default system, user and firmware information - community.general.redfish_info: - category: ["Systems", "Accounts", "Update"] - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get default system, user and firmware information + community.general.redfish_info: + category: ["Systems", "Accounts", "Update"] + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get Manager NIC inventory information - community.general.redfish_info: - category: Manager - command: GetManagerNicInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get Manager NIC inventory information + community.general.redfish_info: + category: Manager + command: GetManagerNicInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get boot override information - community.general.redfish_info: - category: Systems - command: GetBootOverride - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get boot override information + community.general.redfish_info: + category: Systems + command: GetBootOverride + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get chassis inventory - community.general.redfish_info: - category: Chassis - command: GetChassisInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get chassis inventory + community.general.redfish_info: + category: Chassis + command: GetChassisInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get all information available in the Manager category - community.general.redfish_info: - category: Manager - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get all information available in the Manager category + community.general.redfish_info: + category: Manager + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get firmware update capability information - community.general.redfish_info: - category: Update - command: GetFirmwareUpdateCapabilities - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get firmware update capability information + community.general.redfish_info: + category: Update + command: GetFirmwareUpdateCapabilities + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get firmware inventory - community.general.redfish_info: - category: Update - command: GetFirmwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get firmware inventory + community.general.redfish_info: + category: Update + command: GetFirmwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get service identification - community.general.redfish_info: - category: Manager - command: GetServiceIdentification - manager: "{{ manager }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get service identification + community.general.redfish_info: + category: Manager + command: GetServiceIdentification + manager: "{{ manager }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get software inventory - community.general.redfish_info: - category: Update - command: GetSoftwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get software inventory + community.general.redfish_info: + category: Update + command: GetSoftwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get the status of an update operation - community.general.redfish_info: - category: Update - command: GetUpdateStatus - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_handle: /redfish/v1/TaskService/TaskMonitors/735 +- name: Get the status of an update operation + community.general.redfish_info: + category: Update + command: GetUpdateStatus + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 - - name: Get Manager Services - community.general.redfish_info: - category: Manager - command: GetNetworkProtocols - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get Manager Services + community.general.redfish_info: + category: Manager + command: GetNetworkProtocols + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get all information available in all categories - community.general.redfish_info: - category: all - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get all information available in all categories + community.general.redfish_info: + category: all + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get system health report - community.general.redfish_info: - category: Systems - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get system health report + community.general.redfish_info: + category: Systems + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get chassis health report - community.general.redfish_info: - category: Chassis - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get chassis health report + community.general.redfish_info: + category: Chassis + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get manager health report - community.general.redfish_info: - category: Manager - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get manager health report + community.general.redfish_info: + category: Manager + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get manager Redfish Host Interface inventory - community.general.redfish_info: - category: Manager - command: GetHostInterfaces - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get manager Redfish Host Interface inventory + community.general.redfish_info: + category: Manager + command: GetHostInterfaces + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get Manager Inventory - community.general.redfish_info: - category: Manager - command: GetManagerInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get Manager Inventory + community.general.redfish_info: + category: Manager + command: GetManagerInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get HPE Thermal Config - community.general.redfish_info: - category: Chassis - command: GetHPEThermalConfig - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get HPE Thermal Config + community.general.redfish_info: + category: Chassis + command: GetHPEThermalConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get HPE Fan Percent Minimum - community.general.redfish_info: - category: Chassis - command: GetHPEFanPercentMin - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get HPE Fan Percent Minimum + community.general.redfish_info: + category: Chassis + command: GetHPEFanPercentMin + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Get BIOS registry - community.general.redfish_info: - category: Systems - command: GetBiosRegistries - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" +- name: Get BIOS registry + community.general.redfish_info: + category: Systems + command: GetBiosRegistries + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" - - name: Check the availability of the service with a timeout of 5 seconds - community.general.redfish_info: - category: Service - command: CheckAvailability - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 5 - register: result -''' +- name: Check the availability of the service with a timeout of 5 seconds + community.general.redfish_info: + category: Service + command: CheckAvailability + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 5 + register: result +""" -RETURN = ''' +RETURN = r""" result: - description: different results depending on task - returned: always - type: dict - sample: List of CPUs on system -''' + description: Different results depending on task. + returned: always + type: dict + sample: List of CPUs on system +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index 338fb92ebd..d9f791e5cf 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -10,16 +10,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redhat_subscription short_description: Manage registration and subscriptions to RHSM using C(subscription-manager) description: - - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command, - registering using D-Bus if possible. + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command, + registering using D-Bus if possible. author: "Barnaby Court (@barnabycourt)" notes: - - | + - >- The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) to register, starting from community.general 6.5.0: this is done so credentials (username, password, activation keys) can be passed to C(rhsm) in a secure way. @@ -42,171 +41,161 @@ notes: already registered system, for example attaching pools to it (using O(pool_ids)), and modifying the C(syspurpose) attributes (using O(syspurpose)). requirements: - - subscription-manager - - Optionally the C(dbus) Python library; this is usually included in the OS - as it is used by C(subscription-manager). + - subscription-manager + - Optionally the C(dbus) Python library; this is usually included in the OS as it is used by C(subscription-manager). extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - whether to register and subscribe (V(present)), or unregister (V(absent)) a system - choices: [ "present", "absent" ] - default: "present" + state: + description: + - Whether to register and subscribe (V(present)), or unregister (V(absent)) a system. + choices: ["present", "absent"] + default: "present" + type: str + username: + description: + - Access.redhat.com or Red Hat Satellite or Katello username. + type: str + password: + description: + - Access.redhat.com or Red Hat Satellite or Katello password. + type: str + token: + description: + - Sso.redhat.com API access token. + type: str + version_added: 6.3.0 + server_hostname: + description: + - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + server_insecure: + description: + - Enable or disable https server certificate verification when connecting to O(server_hostname). + type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + rhsm_baseurl: + description: + - Specify CDN baseurl. + type: str + rhsm_repo_ca_cert: + description: + - Specify an alternative location for a CA certificate for CDN. + type: str + server_proxy_hostname: + description: + - Specify an HTTP proxy hostname. + type: str + server_proxy_scheme: + description: + - Specify an HTTP proxy scheme, for example V(http) or V(https). + type: str + version_added: 6.2.0 + server_proxy_port: + description: + - Specify an HTTP proxy port. + type: str + server_proxy_user: + description: + - Specify a user for HTTP proxy with basic authentication. + type: str + server_proxy_password: + description: + - Specify a password for HTTP proxy with basic authentication. + type: str + auto_attach: + description: + - Upon successful registration, auto-consume available subscriptions. + - "Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0." + type: bool + activationkey: + description: + - Supply an activation key for use with registration. + type: str + org_id: + description: + - Organization ID to use in conjunction with activationkey. + type: str + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello. + type: str + pool_ids: + description: + - "Specify subscription pool IDs to consume. + - A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), + or as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: 2). If the + quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this)." + default: [] + type: list + elements: raw + consumer_type: + description: + - The type of unit to register, defaults to system. + type: str + consumer_name: + description: + - Name of the system to register, defaults to the hostname. + type: str + consumer_id: + description: + - "References an existing consumer ID to resume using a previous registration for this system. If the system's identity certificate is + lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The default is to not specify a consumer + ID so a new ID is created." + type: str + force_register: + description: + - Register the system even if it is already registered. + type: bool + default: false + release: + description: + - Set a release version. + type: str + syspurpose: + description: + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM server. Syspurpose + attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file already contains some + attributes, then new attributes overwrite existing attributes. When some attribute is not listed in the new list of attributes, the existing + attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. + type: dict + suboptions: + usage: + description: Syspurpose attribute usage. type: str - username: - description: - - access.redhat.com or Red Hat Satellite or Katello username + role: + description: Syspurpose attribute role. type: str - password: - description: - - access.redhat.com or Red Hat Satellite or Katello password + service_level_agreement: + description: Syspurpose attribute service_level_agreement. type: str - token: - description: - - sso.redhat.com API access token. - type: str - version_added: 6.3.0 - server_hostname: - description: - - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server. - type: str - server_insecure: - description: - - Enable or disable https server certificate verification when connecting to O(server_hostname). - type: str - server_prefix: - description: - - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. - type: str - version_added: 3.3.0 - server_port: - description: - - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. - type: str - version_added: 3.3.0 - rhsm_baseurl: - description: - - Specify CDN baseurl - type: str - rhsm_repo_ca_cert: - description: - - Specify an alternative location for a CA certificate for CDN - type: str - server_proxy_hostname: - description: - - Specify an HTTP proxy hostname. - type: str - server_proxy_scheme: - description: - - Specify an HTTP proxy scheme, for example V(http) or V(https). - type: str - version_added: 6.2.0 - server_proxy_port: - description: - - Specify an HTTP proxy port. - type: str - server_proxy_user: - description: - - Specify a user for HTTP proxy with basic authentication - type: str - server_proxy_password: - description: - - Specify a password for HTTP proxy with basic authentication - type: str - auto_attach: - description: - - Upon successful registration, auto-consume available subscriptions - - | - Please note that the alias O(ignore:autosubscribe) was removed in - community.general 9.0.0. - type: bool - activationkey: - description: - - supply an activation key for use with registration - type: str - org_id: - description: - - Organization ID to use in conjunction with activationkey - type: str - environment: - description: - - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello - type: str - pool_ids: - description: - - | - Specify subscription pool IDs to consume. - A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), - or as a C(dict) with the pool ID as the key, and a quantity as the value (for example - V(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple - entitlements from a pool (the pool must support this). - default: [] + addons: + description: Syspurpose attribute addons. type: list - elements: raw - consumer_type: + elements: str + sync: description: - - The type of unit to register, defaults to system - type: str - consumer_name: - description: - - Name of the system to register, defaults to the hostname - type: str - consumer_id: - description: - - | - References an existing consumer ID to resume using a previous registration - for this system. If the system's identity certificate is lost or corrupted, - this option allows it to resume using its previous identity and subscriptions. - The default is to not specify a consumer ID so a new ID is created. - type: str - force_register: - description: - - Register the system even if it is already registered + - When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this option is V(false), + then syspurpose attributes will be synchronized with RHSM server by rhsmcertd daemon. type: bool default: false - release: - description: - - Set a release version - type: str - syspurpose: - description: - - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) - and synchronize these attributes with RHSM server. Syspurpose attributes help attach - the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file - already contains some attributes, then new attributes overwrite existing attributes. - When some attribute is not listed in the new list of attributes, the existing - attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. - type: dict - suboptions: - usage: - description: Syspurpose attribute usage - type: str - role: - description: Syspurpose attribute role - type: str - service_level_agreement: - description: Syspurpose attribute service_level_agreement - type: str - addons: - description: Syspurpose attribute addons - type: list - elements: str - sync: - description: - - When this option is V(true), then syspurpose attributes are synchronized with - RHSM server immediately. When this option is V(false), then syspurpose attributes - will be synchronized with RHSM server by rhsmcertd daemon. - type: bool - default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. community.general.redhat_subscription: state: present @@ -275,17 +264,15 @@ EXAMPLES = ''' - addon1 - addon2 sync: true -''' +""" -RETURN = ''' +RETURN = r""" subscribed_pool_ids: - description: List of pool IDs to which system is now subscribed - returned: success - type: dict - sample: { - "8a85f9815ab905d3015ab928c7005de4": "1" - } -''' + description: List of pool IDs to which system is now subscribed. + returned: success + type: dict + sample: {"8a85f9815ab905d3015ab928c7005de4": "1"} +""" from os.path import isfile from os import getuid, unlink diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py index a30b89922c..716f5f1851 100644 --- a/plugins/modules/redis.py +++ b/plugins/modules/redis.py @@ -8,91 +8,85 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis short_description: Various redis commands, replica and flush description: - - Unified utility to interact with redis instances. + - Unified utility to interact with redis instances. extends_documentation_fragment: - - community.general.redis - - community.general.attributes + - community.general.redis + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - command: - description: - - The selected redis command - - V(config) ensures a configuration setting on an instance. - - V(flush) flushes all the instance or a specified db. - - V(replica) sets a redis instance in replica or master mode. (V(slave) is an alias for V(replica).) - choices: [ config, flush, replica, slave ] - type: str - tls: - default: false - version_added: 4.6.0 - login_user: - version_added: 4.6.0 - validate_certs: - version_added: 4.6.0 - ca_certs: - version_added: 4.6.0 - master_host: - description: - - The host of the master instance [replica command] - type: str - master_port: - description: - - The port of the master instance [replica command] - type: int - replica_mode: - description: - - The mode of the redis instance [replica command] - - V(slave) is an alias for V(replica). - default: replica - choices: [ master, replica, slave ] - type: str - aliases: - - slave_mode - db: - description: - - The database to flush (used in db mode) [flush command] - type: int - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - default: all - choices: [ all, db ] - type: str - name: - description: - - A redis config key. - type: str - value: - description: - - A redis config value. When memory size is needed, it is possible - to specify it in the usual form of 1KB, 2M, 400MB where the base is 1024. - Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. - type: str + command: + description: + - The selected redis command. + - V(config) ensures a configuration setting on an instance. + - V(flush) flushes all the instance or a specified db. + - V(replica) sets a redis instance in replica or master mode. (V(slave) is an alias for V(replica)). + choices: [config, flush, replica, slave] + type: str + tls: + default: false + version_added: 4.6.0 + login_user: + version_added: 4.6.0 + validate_certs: + version_added: 4.6.0 + ca_certs: + version_added: 4.6.0 + master_host: + description: + - The host of the master instance [replica command]. + type: str + master_port: + description: + - The port of the master instance [replica command]. + type: int + replica_mode: + description: + - The mode of the redis instance [replica command]. + - V(slave) is an alias for V(replica). + default: replica + choices: [master, replica, slave] + type: str + aliases: + - slave_mode + db: + description: + - The database to flush (used in DB mode) [flush command]. + type: int + flush_mode: + description: + - Type of flush (all the DBs in a redis instance or a specific one) [flush command]. + default: all + choices: [all, db] + type: str + name: + description: + - A redis config key. + type: str + value: + description: + - A redis config value. When memory size is needed, it is possible to specify it in the usual form of 1KB, 2M, 400MB where the base is 1024. + Units are case insensitive, in other words 1m = 1mb = 1M = 1MB. + type: str notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making replica of is password protected - this needs to be in the redis.conf in the masterauth variable - + - Requires the C(redis-py) Python package on the remote host. You can install it with pip + (C(pip install redis)) or with a package manager. U(https://github.com/andymccurdy/redis-py). + - If the redis master instance you are making replica of is password protected this needs to be in the C(redis.conf) in the C(masterauth) variable. seealso: - - module: community.general.redis_info -requirements: [ redis ] + - module: community.general.redis_info +requirements: [redis] author: "Xabier Larrakoetxea (@slok)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set local redis instance to be a replica of melee.island on port 6377 community.general.redis: command: replica @@ -142,7 +136,7 @@ EXAMPLES = ''' ca_certs: /etc/redis/certs/ca.crt client_cert_file: /etc/redis/certs/redis.crt client_key_file: /etc/redis/certs/redis.key -''' +""" import traceback diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py index fe5cc07ef9..03ae78dce3 100644 --- a/plugins/modules/redis_data.py +++ b/plugins/modules/redis_data.py @@ -8,71 +8,69 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data short_description: Set key value pairs in Redis version_added: 3.7.0 description: - - Set key value pairs in Redis database. + - Set key value pairs in Redis database. author: "Andreas Botzner (@paginabianca)" attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - key: - description: - - Database key. - required: true - type: str - value: - description: - - Value that key should be set to. - required: false - type: str - expiration: - description: - - Expiration time in milliseconds. - Setting this flag will always result in a change in the database. - required: false - type: int - non_existing: - description: - - Only set key if it does not already exist. - required: false - type: bool - existing: - description: - - Only set key if it already exists. - required: false - type: bool - keep_ttl: - description: - - Retain the time to live associated with the key. - required: false - type: bool - state: - description: - - State of the key. - default: present - type: str - choices: - - present - - absent + key: + description: + - Database key. + required: true + type: str + value: + description: + - Value that key should be set to. + required: false + type: str + expiration: + description: + - Expiration time in milliseconds. Setting this flag will always result in a change in the database. + required: false + type: int + non_existing: + description: + - Only set key if it does not already exist. + required: false + type: bool + existing: + description: + - Only set key if it already exists. + required: false + type: bool + keep_ttl: + description: + - Retain the time to live associated with the key. + required: false + type: bool + state: + description: + - State of the key. + default: present + type: str + choices: + - present + - absent extends_documentation_fragment: - community.general.redis.documentation - community.general.attributes seealso: - - module: community.general.redis_data_incr - - module: community.general.redis_data_info - - module: community.general.redis -''' + - module: community.general.redis_data_incr + - module: community.general.redis_data_info + - module: community.general.redis +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set key foo=bar on localhost with no username community.general.redis_data: login_host: localhost @@ -116,9 +114,9 @@ EXAMPLES = ''' login_password: supersecret key: foo state: absent -''' +""" -RETURN = ''' +RETURN = r""" old_value: description: Value of key before setting. returned: on_success if O(state=present) and key exists in database. @@ -134,7 +132,7 @@ msg: returned: always type: str sample: 'Set key: foo to bar' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py index b359e0cb94..0f0aa92974 100644 --- a/plugins/modules/redis_data_incr.py +++ b/plugins/modules/redis_data_incr.py @@ -8,24 +8,22 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data_incr short_description: Increment keys in Redis version_added: 4.0.0 description: - - Increment integers or float keys in Redis database and get new value. - - Default increment for all keys is 1. For specific increments use the - O(increment_int) and O(increment_float) options. + - Increment integers or float keys in Redis database and get new value. + - Default increment for all keys is V(1). For specific increments use the O(increment_int) and O(increment_float) options. author: "Andreas Botzner (@paginabianca)" attributes: check_mode: support: partial details: - - For C(check_mode) to work, the specified O(login_user) needs permission to - run the C(GET) command on the key, otherwise the module will fail. - - When using C(check_mode) the module will try to calculate the value that - Redis would return. If the key is not present, 0.0 is used as value. + - For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise the module will + fail. + - When using C(check_mode) the module will try to calculate the value that Redis would return. If the key is not present, V(0.0) is used as + value. diff_mode: support: none options: @@ -42,8 +40,7 @@ options: increment_float: description: - Float amount to increment the key by. - - This only works with keys that contain float values - in their string representation. + - This only works with keys that contain float values in their string representation. type: float required: false @@ -53,12 +50,12 @@ extends_documentation_fragment: - community.general.attributes seealso: - - module: community.general.redis_data - - module: community.general.redis_data_info - - module: community.general.redis -''' + - module: community.general.redis_data + - module: community.general.redis_data_info + - module: community.general.redis +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Increment integer key foo on localhost with no username and print new value community.general.redis_data_incr: login_host: localhost @@ -77,11 +74,11 @@ EXAMPLES = ''' login_password: somepass key: foo increment_float: '20.4' -''' +""" -RETURN = ''' +RETURN = r""" value: - description: Incremented value of key + description: Incremented value of key. returned: on success type: float sample: '4039.4' @@ -90,7 +87,7 @@ msg: returned: always type: str sample: 'Incremented key: foo by 20.4 to 65.9' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py index c0af619057..48be45a92f 100644 --- a/plugins/modules/redis_data_info.py +++ b/plugins/modules/redis_data_info.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data_info short_description: Get value of key in Redis database version_added: 3.7.0 @@ -33,9 +32,9 @@ seealso: - module: community.general.redis_data_incr - module: community.general.redis_info - module: community.general.redis -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get key foo=bar from loalhost with no username community.general.redis_data_info: login_host: localhost @@ -50,9 +49,9 @@ EXAMPLES = ''' validate_certs: true ssl_ca_certs: /path/to/ca/certs key: foo -''' +""" -RETURN = ''' +RETURN = r""" exists: description: If they key exists in the database. returned: on success @@ -67,7 +66,7 @@ msg: returned: always type: str sample: 'Got key: foo with value: bar' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py index c75abcf212..bc43f9251e 100644 --- a/plugins/modules/redis_info.py +++ b/plugins/modules/redis_info.py @@ -9,17 +9,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: redis_info short_description: Gather information about Redis servers version_added: '0.2.0' description: -- Gathers information and statistics about Redis servers. + - Gathers information and statistics about Redis servers. extends_documentation_fragment: -- community.general.redis -- community.general.attributes -- community.general.attributes.info_module + - community.general.redis + - community.general.attributes + - community.general.attributes.info_module options: login_user: version_added: 7.5.0 @@ -36,11 +35,11 @@ options: type: bool version_added: 9.1.0 seealso: -- module: community.general.redis + - module: community.general.redis author: "Pavlo Bashynskyi (@levonet)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get server information community.general.redis_info: register: result @@ -57,9 +56,9 @@ EXAMPLES = r''' - name: Print server cluster information ansible.builtin.debug: var: result.cluster_info -''' +""" -RETURN = r''' +RETURN = r""" info: description: The default set of server information sections U(https://redis.io/commands/info). returned: success @@ -211,7 +210,7 @@ cluster: "cluster_stats_messages_received": 1483968, "total_cluster_links_buffer_limit_exceeded": 0 } -''' +""" import traceback diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py index 7f23009972..4d0a810108 100644 --- a/plugins/modules/rhevm.py +++ b/plugins/modules/rhevm.py @@ -8,151 +8,150 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: rhevm short_description: RHEV/oVirt automation description: - - This module only supports oVirt/RHEV version 3. - - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. - - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. + - This module only supports oVirt/RHEV version 3. + - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. requirements: - - ovirtsdk + - ovirtsdk author: - - Timothy Vandenbrande (@TimothyVandenbrande) + - Timothy Vandenbrande (@TimothyVandenbrande) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - user: - description: - - The user to authenticate with. - type: str - default: admin@internal - password: - description: - - The password for user authentication. - type: str - required: true - server: - description: - - The name/IP of your RHEV-m/oVirt instance. - type: str - default: 127.0.0.1 - port: - description: - - The port on which the API is reachable. - type: int - default: 443 - insecure_api: - description: - - A boolean switch to make a secure or insecure connection to the server. - type: bool - default: false - name: - description: - - The name of the VM. - type: str - cluster: - description: - - The RHEV/oVirt cluster in which you want you VM to start. - type: str - default: '' - datacenter: - description: - - The RHEV/oVirt datacenter in which you want you VM to start. - type: str - default: Default - state: - description: - - This serves to create/remove/update or powermanage your VM. - type: str - choices: [ absent, cd, down, info, ping, present, restarted, up ] - default: present - image: - description: - - The template to use for the VM. - type: str - type: - description: - - To define if the VM is a server or desktop. - type: str - choices: [ desktop, host, server ] - default: server - vmhost: - description: - - The host you wish your VM to run on. - type: str - vmcpu: - description: - - The number of CPUs you want in your VM. - type: int - default: 2 - cpu_share: - description: - - This parameter is used to configure the CPU share. - type: int - default: 0 - vmmem: - description: - - The amount of memory you want your VM to use (in GB). - type: int - default: 1 - osver: - description: - - The operating system option in RHEV/oVirt. - type: str - default: rhel_6x64 - mempol: - description: - - The minimum amount of memory you wish to reserve for this system. - type: int - default: 1 - vm_ha: - description: - - To make your VM High Available. - type: bool - default: true - disks: - description: - - This option uses complex arguments and is a list of disks with the options name, size and domain. - type: list - elements: str - ifaces: - description: - - This option uses complex arguments and is a list of interfaces with the options name and vlan. - type: list - elements: str - aliases: [ interfaces, nics ] - boot_order: - description: - - This option uses complex arguments and is a list of items that specify the bootorder. - type: list - elements: str - default: [ hd, network ] - del_prot: - description: - - This option sets the delete protection checkbox. - type: bool - default: true - cd_drive: - description: - - The CD you wish to have mounted on the VM when O(state=cd). - type: str - timeout: - description: - - The timeout you wish to define for power actions. - - When O(state=up). - - When O(state=down). - - When O(state=restarted). - type: int -''' + user: + description: + - The user to authenticate with. + type: str + default: admin@internal + password: + description: + - The password for user authentication. + type: str + required: true + server: + description: + - The name/IP of your RHEV-m/oVirt instance. + type: str + default: 127.0.0.1 + port: + description: + - The port on which the API is reachable. + type: int + default: 443 + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + type: bool + default: false + name: + description: + - The name of the VM. + type: str + cluster: + description: + - The RHEV/oVirt cluster in which you want you VM to start. + type: str + default: '' + datacenter: + description: + - The RHEV/oVirt datacenter in which you want you VM to start. + type: str + default: Default + state: + description: + - This serves to create/remove/update or powermanage your VM. + type: str + choices: [absent, cd, down, info, ping, present, restarted, up] + default: present + image: + description: + - The template to use for the VM. + type: str + type: + description: + - To define if the VM is a server or desktop. + type: str + choices: [desktop, host, server] + default: server + vmhost: + description: + - The host you wish your VM to run on. + type: str + vmcpu: + description: + - The number of CPUs you want in your VM. + type: int + default: 2 + cpu_share: + description: + - This parameter is used to configure the CPU share. + type: int + default: 0 + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + type: int + default: 1 + osver: + description: + - The operating system option in RHEV/oVirt. + type: str + default: rhel_6x64 + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + type: int + default: 1 + vm_ha: + description: + - To make your VM High Available. + type: bool + default: true + disks: + description: + - This option uses complex arguments and is a list of disks with the options V(name), V(size), and V(domain). + type: list + elements: str + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options V(name) and V(vlan). + type: list + elements: str + aliases: [interfaces, nics] + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + type: list + elements: str + default: [hd, network] + del_prot: + description: + - This option sets the delete protection checkbox. + type: bool + default: true + cd_drive: + description: + - The CD you wish to have mounted on the VM when O(state=cd). + type: str + timeout: + description: + - The timeout you wish to define for power actions. + - When O(state=up). + - When O(state=down). + - When O(state=restarted). + type: int +""" -RETURN = r''' +RETURN = r""" vm: description: Returns all of the VMs variables and execution. returned: always @@ -216,9 +215,9 @@ vm: "vmhost": "host416", "vmmem": "16" } -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic get info from VM community.general.rhevm: server: rhevm01 @@ -258,33 +257,33 @@ EXAMPLES = r''' vmcpu: 4 vmmem: 2 ifaces: - - name: eth0 - vlan: vlan2202 - - name: eth1 - vlan: vlan36 - - name: eth2 - vlan: vlan38 - - name: eth3 - vlan: vlan2202 + - name: eth0 + vlan: vlan2202 + - name: eth1 + vlan: vlan36 + - name: eth2 + vlan: vlan38 + - name: eth3 + vlan: vlan2202 disks: - - name: root - size: 10 - domain: ssd-san - - name: swap - size: 10 - domain: 15kiscsi-san - - name: opt - size: 10 - domain: 15kiscsi-san - - name: var - size: 10 - domain: 10kiscsi-san - - name: home - size: 10 - domain: sata-san + - name: root + size: 10 + domain: ssd-san + - name: swap + size: 10 + domain: 15kiscsi-san + - name: opt + size: 10 + domain: 15kiscsi-san + - name: var + size: 10 + domain: 10kiscsi-san + - name: home + size: 10 + domain: sata-san boot_order: - - network - - hd + - network + - hd state: present - name: Add a CD to the disk cd_drive @@ -302,33 +301,33 @@ EXAMPLES = r''' type: host cluster: rhevm01 ifaces: - - name: em1 - - name: em2 - - name: p3p1 - ip: 172.31.224.200 - netmask: 255.255.254.0 - - name: p3p2 - ip: 172.31.225.200 - netmask: 255.255.254.0 - - name: bond0 - bond: - - em1 - - em2 - network: rhevm - ip: 172.31.222.200 - netmask: 255.255.255.0 - management: true - - name: bond0.36 - network: vlan36 - ip: 10.2.36.200 - netmask: 255.255.254.0 - gateway: 10.2.36.254 - - name: bond0.2202 - network: vlan2202 - - name: bond0.38 - network: vlan38 + - name: em1 + - name: em2 + - name: p3p1 + ip: 172.31.224.200 + netmask: 255.255.254.0 + - name: p3p2 + ip: 172.31.225.200 + netmask: 255.255.254.0 + - name: bond0 + bond: + - em1 + - em2 + network: rhevm + ip: 172.31.222.200 + netmask: 255.255.255.0 + management: true + - name: bond0.36 + network: vlan36 + ip: 10.2.36.200 + netmask: 255.255.254.0 + gateway: 10.2.36.254 + - name: bond0.2202 + network: vlan2202 + - name: bond0.38 + network: vlan38 state: present -''' +""" import time diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py index 8c74ca8192..ca3a0d03d9 100644 --- a/plugins/modules/rhsm_release.py +++ b/plugins/modules/rhsm_release.py @@ -8,18 +8,15 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rhsm_release short_description: Set or Unset RHSM Release version description: - Sets or unsets the release version used by RHSM repositories. notes: - - This module will fail on an unregistered system. - Use the M(community.general.redhat_subscription) module to register a system - prior to setting the RHSM release. - - It is possible to interact with C(subscription-manager) only as root, - so root permissions are required to successfully run this module. + - This module will fail on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system prior to setting + the RHSM release. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully run this module. requirements: - Red Hat Enterprise Linux 6+ with subscription-manager installed extends_documentation_fragment: @@ -37,9 +34,9 @@ options: type: str author: - Sean Myers (@seandst) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Set release version to 7.1 - name: Set RHSM release version community.general.rhsm_release: @@ -53,15 +50,15 @@ EXAMPLES = ''' # Unset release version - name: Unset RHSM release release community.general.rhsm_release: - release: null -''' + release: +""" -RETURN = ''' +RETURN = r""" current_release: - description: The current RHSM release version value + description: The current RHSM release version value. returned: success type: str -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py index ed8b0e7d58..3e95e69425 100644 --- a/plugins/modules/rhsm_repository.py +++ b/plugins/modules/rhsm_repository.py @@ -8,20 +8,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rhsm_repository short_description: Manage RHSM repositories using the subscription-manager command description: - - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription - Management entitlement platform using the C(subscription-manager) command. + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command. author: Giovanni Sciortino (@giovannisciortino) notes: - - In order to manage RHSM repositories the system must be already registered - to RHSM manually or using the Ansible M(community.general.redhat_subscription) module. - - It is possible to interact with C(subscription-manager) only as root, - so root permissions are required to successfully run this module. - + - In order to manage RHSM repositories the system must be already registered to RHSM manually or using the Ansible M(community.general.redhat_subscription) + module. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully run this module. requirements: - subscription-manager extends_documentation_fragment: @@ -34,31 +30,27 @@ attributes: options: state: description: - - If state is equal to present or disabled, indicates the desired - repository state. - - In community.general 10.0.0 the states V(present) and V(absent) have been - removed. Please use V(enabled) and V(disabled) instead. + - If state is equal to present or disabled, indicates the desired repository state. + - In community.general 10.0.0 the states V(present) and V(absent) have been removed. Please use V(enabled) and V(disabled) instead. choices: [enabled, disabled] default: "enabled" type: str name: description: - The ID of repositories to enable. - - To operate on several repositories this can accept a comma separated - list or a YAML list. + - To operate on several repositories this can accept a comma separated list or a YAML list. required: true type: list elements: str purge: description: - - Disable all currently enabled repositories that are not not specified in O(name). - Only set this to V(true) if passing in a list of repositories to the O(name) field. - Using this with C(loop) will most likely not have the desired result. + - Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing in a list of repositories + to the O(name) field. Using this with C(loop) will most likely not have the desired result. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable a RHSM repository community.general.rhsm_repository: name: rhel-7-server-rpms @@ -77,16 +69,16 @@ EXAMPLES = ''' community.general.rhsm_repository: name: rhel-7-server-rpms purge: true -''' +""" -RETURN = ''' +RETURN = r""" repositories: description: - The list of RHSM repositories with their states. - When this module is used to change the repository states, this list contains the updated states after the changes. returned: success type: list -''' +""" import os from fnmatch import fnmatch diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py index 438263da22..cd3a3f4e8f 100644 --- a/plugins/modules/riak.py +++ b/plugins/modules/riak.py @@ -9,18 +9,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: riak short_description: This module handles some common Riak operations description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. + - This module can be used to join nodes to a cluster, check the status of the cluster. author: - - "James Martin (@jsmartin)" - - "Drew Kerrigan (@drewkerrigan)" + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: none @@ -34,17 +32,17 @@ options: type: str config_dir: description: - - The path to the riak configuration directory + - The path to the riak configuration directory. default: /etc/riak type: path http_conn: description: - - The ip address and port that is listening for Riak HTTP queries + - The ip address and port that is listening for Riak HTTP queries. default: 127.0.0.1:8098 type: str target_node: description: - - The target node for certain operations (join, ping) + - The target node for certain operations (join, ping). default: riak@127.0.0.1 type: str wait_for_handoffs: @@ -64,13 +62,12 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Join's a Riak node to another node" community.general.riak: command: join @@ -83,7 +80,7 @@ EXAMPLES = ''' - name: Wait for riak_kv service to startup community.general.riak: wait_for_service: kv -''' +""" import json import time diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py index 473f0150ab..82de3f829f 100644 --- a/plugins/modules/rocketchat.py +++ b/plugins/modules/rocketchat.py @@ -12,11 +12,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: rocketchat short_description: Send notifications to Rocket Chat description: - - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration + - This module sends notifications to Rocket Chat through the Incoming WebHook integration. author: "Ramon de la Fuente (@ramondelafuente)" extends_documentation_fragment: - community.general.attributes @@ -29,15 +29,12 @@ options: domain: type: str description: - - The domain for your environment without protocol. (For example - V(example.com) or V(chat.example.com).) + - The domain for your environment without protocol. (For example V(example.com) or V(chat.example.com)). required: true token: type: str description: - - Rocket Chat Incoming Webhook integration token. This provides - authentication to Rocket Chat's Incoming webhook for posting - messages. + - Rocket Chat Incoming Webhook integration token. This provides authentication to Rocket Chat's Incoming webhook for posting messages. required: true protocol: type: str @@ -54,8 +51,8 @@ options: channel: type: str description: - - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) - specified during the creation of webhook. + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) specified during the creation of + webhook. username: type: str description: @@ -69,8 +66,7 @@ options: icon_emoji: type: str description: - - Emoji for the message sender. The representation for the available emojis can be - got from Rocket Chat. + - Emoji for the message sender. The representation for the available emojis can be got from Rocket Chat. - For example V(:thumbsup:). - If O(icon_emoji) is set, O(icon_url) will not be used. link_names: @@ -83,14 +79,13 @@ options: - 0 validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true color: type: str description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the message. default: 'normal' choices: - 'normal' @@ -102,17 +97,17 @@ options: elements: dict description: - Define a list of attachments. -''' +""" -EXAMPLES = """ -- name: Send notification message via Rocket Chat +EXAMPLES = r""" +- name: Send notification message through Rocket Chat community.general.rocketchat: token: thetoken/generatedby/rocketchat domain: chat.example.com msg: '{{ inventory_hostname }} completed' delegate_to: localhost -- name: Send notification message via Rocket Chat all options +- name: Send notification message through Rocket Chat all options community.general.rocketchat: domain: chat.example.com token: thetoken/generatedby/rocketchat @@ -151,12 +146,12 @@ EXAMPLES = """ delegate_to: localhost """ -RETURN = """ +RETURN = r""" changed: - description: A flag indicating if any change was made or not. - returned: success - type: bool - sample: false + description: A flag indicating if any change was made or not. + returned: success + type: bool + sample: false """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py index 4bce9ab980..e9bfc239b0 100644 --- a/plugins/modules/rollbar_deployment.py +++ b/plugins/modules/rollbar_deployment.py @@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rollbar_deployment author: "Max Riveiro (@kavu)" short_description: Notify Rollbar about app deployments description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) + - Notify Rollbar about app deployments (see U(https://rollbar.com/docs/deploys_other/)). extends_documentation_fragment: - community.general.attributes attributes: @@ -33,7 +31,7 @@ options: environment: type: str description: - - Name of the environment being deployed, e.g. 'production'. + - Name of the environment being deployed, for example V(production). required: true revision: type: str @@ -53,7 +51,7 @@ options: comment: type: str description: - - Deploy comment (e.g. what is being deployed). + - Deploy comment (for example what is being deployed). required: false url: type: str @@ -63,31 +61,30 @@ options: default: 'https://api.rollbar.com/api/1/deploy/' validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using + - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: true type: bool -''' +""" -EXAMPLES = ''' - - name: Rollbar deployment notification - community.general.rollbar_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: '4.2' - rollbar_user: admin - comment: Test Deploy +EXAMPLES = r""" +- name: Rollbar deployment notification + community.general.rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy - - name: Notify rollbar about current git revision deployment by current user - community.general.rollbar_deployment: - token: "{{ rollbar_access_token }}" - environment: production - revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" - user: "{{ lookup('env', 'USER') }}" -''' +- name: Notify rollbar about current git revision deployment by current user + community.general.rollbar_deployment: + token: "{{ rollbar_access_token }}" + environment: production + revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" + user: "{{ lookup('env', 'USER') }}" +""" import traceback from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index 8c5c693f60..3d5ba865c9 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -10,50 +10,49 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: rpm_ostree_pkg short_description: Install or uninstall overlay additional packages version_added: "2.0.0" description: - - Install or uninstall overlay additional packages using C(rpm-ostree) command. + - Install or uninstall overlay additional packages using C(rpm-ostree) command. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: + name: + description: - Name of overlay package to install or remove. - required: true - type: list - elements: str - aliases: [ pkg ] - state: - description: + required: true + type: list + elements: str + aliases: [pkg] + state: + description: - State of the overlay package. - V(present) simply ensures that a desired package is installed. - V(absent) removes the specified package. - choices: [ 'absent', 'present' ] - default: 'present' - type: str - apply_live: - description: + choices: ['absent', 'present'] + default: 'present' + type: str + apply_live: + description: - Adds the options C(--apply-live) when O(state=present). - Option is ignored when O(state=absent). - For more information, please see U(https://coreos.github.io/rpm-ostree/apply-live/). - type: bool - default: false - version_added: 10.1.0 + type: bool + default: false + version_added: 10.1.0 author: - - Dusty Mabe (@dustymabe) - - Abhijeet Kasurde (@Akasurde) -''' + - Dusty Mabe (@dustymabe) + - Abhijeet Kasurde (@Akasurde) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install overlay package community.general.rpm_ostree_pkg: name: nfs-utils @@ -80,51 +79,51 @@ EXAMPLES = r''' until: rpm_ostree_pkg is not failed retries: 10 dealy: 30 -''' +""" -RETURN = r''' +RETURN = r""" rc: - description: Return code of rpm-ostree command. - returned: always - type: int - sample: 0 + description: Return code of rpm-ostree command. + returned: always + type: int + sample: 0 changed: - description: State changes. - returned: always - type: bool - sample: true + description: State changes. + returned: always + type: bool + sample: true action: - description: Action performed. - returned: always - type: str - sample: 'install' + description: Action performed. + returned: always + type: str + sample: 'install' packages: - description: A list of packages specified. - returned: always - type: list - sample: ['nfs-utils'] + description: A list of packages specified. + returned: always + type: list + sample: ['nfs-utils'] stdout: - description: Stdout of rpm-ostree command. - returned: always - type: str - sample: 'Staging deployment...done\n...' + description: Stdout of rpm-ostree command. + returned: always + type: str + sample: 'Staging deployment...done\n...' stderr: - description: Stderr of rpm-ostree command. - returned: always - type: str - sample: '' + description: Stderr of rpm-ostree command. + returned: always + type: str + sample: '' cmd: - description: Full command used for performed action. - returned: always - type: str - sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' + description: Full command used for performed action. + returned: always + type: str + sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' needs_reboot: - description: Determine if machine needs a reboot to apply current changes. - returned: success - type: bool - sample: true - version_added: 10.1.0 -''' + description: Determine if machine needs a reboot to apply current changes. + returned: success + type: bool + sample: true + version_added: 10.1.0 +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py index 8f21a32680..29b31a6642 100644 --- a/plugins/modules/rundeck_acl_policy.py +++ b/plugins/modules/rundeck_acl_policy.py @@ -11,71 +11,70 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rundeck_acl_policy short_description: Manage Rundeck ACL policies description: - - Create, update and remove Rundeck ACL policies through HTTP API. + - Create, update and remove Rundeck ACL policies through HTTP API. author: "Loic Blot (@nerzhul)" attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - type: str - description: - - Create or remove Rundeck project. - choices: ['present', 'absent'] - default: 'present' - name: - type: str - description: - - Sets the project name. - required: true - api_token: - description: - - Sets the token to authenticate against Rundeck API. - aliases: ["token"] - project: - type: str - description: - - Sets the project which receive the ACL policy. - - If unset, it's a system ACL policy. - policy: - type: str - description: - - Sets the ACL policy content. - - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html. - - It can be a YAML string or a pure Ansible inventory YAML object. - client_cert: - version_added: '0.2.0' - client_key: - version_added: '0.2.0' - force: - version_added: '0.2.0' - force_basic_auth: - version_added: '0.2.0' - http_agent: - version_added: '0.2.0' - url_password: - version_added: '0.2.0' - url_username: - version_added: '0.2.0' - use_proxy: - version_added: '0.2.0' - validate_certs: - version_added: '0.2.0' + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + project: + type: str + description: + - Sets the project which receive the ACL policy. + - If unset, it's a system ACL policy. + policy: + type: str + description: + - Sets the ACL policy content. + - ACL policy content is a YAML object as described in U(http://rundeck.org/docs/man5/aclpolicy.html). + - It can be a YAML string or a pure Ansible inventory YAML object. + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' extends_documentation_fragment: - ansible.builtin.url - community.general.attributes - community.general.rundeck -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create or update a rundeck ACL policy in project Ansible community.general.rundeck_acl_policy: name: "Project_01" @@ -100,22 +99,22 @@ EXAMPLES = ''' url: "https://rundeck.example.org" token: "mytoken" state: absent -''' +""" -RETURN = ''' +RETURN = r""" rundeck_response: - description: Rundeck response when a failure occurs. - returned: failed - type: str + description: Rundeck response when a failure occurs. + returned: failed + type: str before: - description: Dictionary containing ACL policy information before modification. - returned: success - type: dict + description: Dictionary containing ACL policy information before modification. + returned: success + type: dict after: - description: Dictionary containing ACL policy information after modification. - returned: success - type: dict -''' + description: Dictionary containing ACL policy information after modification. + returned: success + type: dict +""" # import module snippets import re diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py index 818bde83c0..540c8c7788 100644 --- a/plugins/modules/rundeck_job_executions_info.py +++ b/plugins/modules/rundeck_job_executions_info.py @@ -9,43 +9,42 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rundeck_job_executions_info short_description: Query executions for a Rundeck job description: - - This module gets the list of executions for a specified Rundeck job. + - This module gets the list of executions for a specified Rundeck job. author: "Phillipe Smith (@phsmith)" version_added: 3.8.0 options: - job_id: - type: str - description: - - The job unique ID. - required: true - status: - type: str - description: - - The job status to filter. - choices: [succeeded, failed, aborted, running] - max: - type: int - description: - - Max results to return. - default: 20 - offset: - type: int - description: - - The start point to return the results. - default: 0 + job_id: + type: str + description: + - The job unique ID. + required: true + status: + type: str + description: + - The job status to filter. + choices: [succeeded, failed, aborted, running] + max: + type: int + description: + - Max results to return. + default: 20 + offset: + type: int + description: + - The start point to return the results. + default: 0 extends_documentation_fragment: - community.general.rundeck - url - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get Rundeck job executions info community.general.rundeck_job_executions_info: url: "https://rundeck.example.org" @@ -57,36 +56,31 @@ EXAMPLES = ''' - name: Show Rundeck job executions info ansible.builtin.debug: var: rundeck_job_executions_info.executions -''' +""" -RETURN = ''' +RETURN = r""" paging: - description: Results pagination info. - returned: success - type: dict - contains: - count: - description: Number of results in the response. - type: int - returned: success - total: - description: Total number of results. - type: int - returned: success - offset: - description: Offset from first of all results. - type: int - returned: success - max: - description: Maximum number of results per page. - type: int - returned: success - sample: { - "count": 20, - "total": 100, - "offset": 0, - "max": 20 - } + description: Results pagination info. + returned: success + type: dict + contains: + count: + description: Number of results in the response. + type: int + returned: success + total: + description: Total number of results. + type: int + returned: success + offset: + description: Offset from first of all results. + type: int + returned: success + max: + description: Maximum number of results per page. + type: int + returned: success + sample: {"count": 20, "total": 100, "offset": 0, "max": 20} executions: description: Job executions list. returned: always @@ -127,7 +121,7 @@ executions: "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import quote diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py index 2ef1447401..f46b5ee432 100644 --- a/plugins/modules/rundeck_job_run.py +++ b/plugins/modules/rundeck_job_run.py @@ -9,75 +9,74 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rundeck_job_run short_description: Run a Rundeck job description: - - This module runs a Rundeck job specified by ID. + - This module runs a Rundeck job specified by ID. author: "Phillipe Smith (@phsmith)" version_added: 3.8.0 attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - job_id: - type: str - description: - - The job unique ID. - required: true - job_options: - type: dict - description: - - The job options for the steps. - - Numeric values must be quoted. - filter_nodes: - type: str - description: - - Filter the nodes where the jobs must run. - - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). - run_at_time: - type: str - description: - - Schedule the job execution to run at specific date and time. - - ISO-8601 date and time format like V(2021-10-05T15:45:00-03:00). - loglevel: - type: str - description: - - Log level configuration. - choices: [debug, verbose, info, warn, error] - default: info - wait_execution: - type: bool - description: - - Wait until the job finished the execution. - default: true - wait_execution_delay: - type: int - description: - - Delay, in seconds, between job execution status check requests. - default: 5 - wait_execution_timeout: - type: int - description: - - Job execution wait timeout in seconds. - - If the timeout is reached, the job will be aborted. - - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check. - default: 120 - abort_on_timeout: - type: bool - description: - - Send a job abort request if exceeded the O(wait_execution_timeout) specified. - default: false + job_id: + type: str + description: + - The job unique ID. + required: true + job_options: + type: dict + description: + - The job options for the steps. + - Numeric values must be quoted. + filter_nodes: + type: str + description: + - Filter the nodes where the jobs must run. + - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). + run_at_time: + type: str + description: + - Schedule the job execution to run at specific date and time. + - ISO-8601 date and time format like V(2021-10-05T15:45:00-03:00). + loglevel: + type: str + description: + - Log level configuration. + choices: [debug, verbose, info, warn, error] + default: info + wait_execution: + type: bool + description: + - Wait until the job finished the execution. + default: true + wait_execution_delay: + type: int + description: + - Delay, in seconds, between job execution status check requests. + default: 5 + wait_execution_timeout: + type: int + description: + - Job execution wait timeout in seconds. + - If the timeout is reached, the job will be aborted. + - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check. + default: 120 + abort_on_timeout: + type: bool + description: + - Send a job abort request if exceeded the O(wait_execution_timeout) specified. + default: false extends_documentation_fragment: - community.general.rundeck - ansible.builtin.url - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Run a Rundeck job community.general.rundeck_job_run: url: "https://rundeck.example.org" @@ -97,9 +96,9 @@ EXAMPLES = ''' api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" job_options: - option_1: "value_1" - option_2: "value_3" - option_3: "value_3" + option_1: "value_1" + option_2: "value_3" + option_3: "value_3" register: rundeck_job_run - name: Run a Rundeck job with timeout, delay between status check and abort on timeout @@ -130,9 +129,9 @@ EXAMPLES = ''' job_id: "xxxxxxxxxxxxxxxxx" wait_execution: false register: rundeck_job_run -''' +""" -RETURN = ''' +RETURN = r""" execution_info: description: Rundeck job execution metadata. returned: always @@ -177,7 +176,7 @@ execution_info: "output": "Test!" } } -''' +""" # Modules import from datetime import datetime, timedelta diff --git a/plugins/modules/rundeck_project.py b/plugins/modules/rundeck_project.py index 79ca575684..0cb6010346 100644 --- a/plugins/modules/rundeck_project.py +++ b/plugins/modules/rundeck_project.py @@ -13,60 +13,59 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rundeck_project short_description: Manage Rundeck projects description: - - Create and remove Rundeck projects through HTTP API. + - Create and remove Rundeck projects through HTTP API. author: "Loic Blot (@nerzhul)" attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - type: str - description: - - Create or remove Rundeck project. - choices: ['present', 'absent'] - default: 'present' - name: - type: str - description: - - Sets the project name. - required: true - api_token: - description: - - Sets the token to authenticate against Rundeck API. - aliases: ["token"] - client_cert: - version_added: '0.2.0' - client_key: - version_added: '0.2.0' - force: - version_added: '0.2.0' - force_basic_auth: - version_added: '0.2.0' - http_agent: - version_added: '0.2.0' - url_password: - version_added: '0.2.0' - url_username: - version_added: '0.2.0' - use_proxy: - version_added: '0.2.0' - validate_certs: - version_added: '0.2.0' + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' extends_documentation_fragment: - ansible.builtin.url - community.general.attributes - community.general.rundeck -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a rundeck project community.general.rundeck_project: name: "Project_01" @@ -83,22 +82,22 @@ EXAMPLES = ''' url: "https://rundeck.example.org" api_token: "mytoken" state: absent -''' +""" -RETURN = ''' +RETURN = r""" rundeck_response: - description: Rundeck response when a failure occurs - returned: failed - type: str + description: Rundeck response when a failure occurs. + returned: failed + type: str before: - description: dictionary containing project information before modification - returned: success - type: dict + description: Dictionary containing project information before modification. + returned: success + type: dict after: - description: dictionary containing project information after modification - returned: success - type: dict -''' + description: Dictionary containing project information after modification. + returned: success + type: dict +""" # import module snippets from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py index 2f1609ca6e..221b87b0dd 100644 --- a/plugins/modules/runit.py +++ b/plugins/modules/runit.py @@ -8,54 +8,50 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: runit author: - - James Sumners (@jsumners) + - James Sumners (@jsumners) short_description: Manage runit services description: - - Controls runit services on remote hosts using the sv utility. + - Controls runit services on remote hosts using the sv utility. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the service to manage. - type: str - required: true - state: - description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. V(restarted) will always bounce the - service (sv restart) and V(killed) will always bounce the service (sv force-stop). - V(reloaded) will send a HUP (sv reload). - V(once) will run a normally downed sv once (sv once), not really - an idempotent operation. - type: str - choices: [ killed, once, reloaded, restarted, started, stopped ] - enabled: - description: - - Whether the service is enabled or not, if disabled it also implies stopped. - type: bool - service_dir: - description: - - directory runsv watches for services - type: str - default: /var/service - service_src: - description: - - directory where services are defined, the source of symlinks to service_dir. - type: str - default: /etc/sv -''' + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the service + (sv restart) and V(killed) will always bounce the service (sv force-stop). V(reloaded) will send a HUP (sv reload). V(once) will run a + normally downed sv once (sv once), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + type: bool + service_dir: + description: + - Directory runsv watches for services. + type: str + default: /var/service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/sv +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Start sv dnscache, if not running community.general.runit: name: dnscache @@ -86,7 +82,7 @@ EXAMPLES = r''' name: dnscache state: reloaded service_dir: /run/service -''' +""" import os import re From 23d97fa4b0dc0956e392772617401d08a05d6932 Mon Sep 17 00:00:00 2001 From: Vladimir Botka Date: Wed, 25 Dec 2024 21:41:40 +0100 Subject: [PATCH 407/482] Add inventory plugin iocage (#9262) * Add inventory plugin iocage #9261 * inventory/iocage.py BOTMETA entry added. * Remove missing methods from the test. * Avoid shell mode. * Parameter host is optional default=localhost * Fix AnsibleError calls. * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * iocage_data removed. Not necessarily defined at this point. * Description mentions that remote hosts are contacted via SSH. * test inventory iocage * Create get_jails and get_properties in iocage plugin to simplify testing. * Update test_iocage.py * Add fixtures iocage_* * Update documentation. * Update documentation. * Update documentation. * Fix localhost environment. * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein * Update plugins/inventory/iocage.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/inventory/iocage.py | 266 ++++++++++ .../inventory/fixtures/iocage_inventory.yml | 460 ++++++++++++++++++ .../fixtures/iocage_inventory.yml.license | 3 + .../inventory/fixtures/iocage_jails.txt | 3 + .../fixtures/iocage_jails.txt.license | 3 + .../inventory/fixtures/iocage_jails.yml | 32 ++ .../fixtures/iocage_jails.yml.license | 3 + .../inventory/fixtures/iocage_properties.txt | 11 + .../fixtures/iocage_properties.txt.license | 3 + .../inventory/fixtures/iocage_properties.yml | 458 +++++++++++++++++ .../fixtures/iocage_properties.yml.license | 3 + .../fixtures/iocage_properties_test_101.txt | 141 ++++++ .../iocage_properties_test_101.txt.license | 3 + .../fixtures/iocage_properties_test_102.txt | 141 ++++++ .../iocage_properties_test_102.txt.license | 3 + .../fixtures/iocage_properties_test_103.txt | 141 ++++++ .../iocage_properties_test_103.txt.license | 3 + tests/unit/plugins/inventory/test_iocage.py | 112 +++++ 19 files changed, 1791 insertions(+) create mode 100644 plugins/inventory/iocage.py create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_inventory.yml create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_jails.txt create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_jails.yml create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties.txt create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties.yml create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt create mode 100644 tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license create mode 100644 tests/unit/plugins/inventory/test_iocage.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 2be4619ecb..dbe3c9cfe1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -213,6 +213,8 @@ files: maintainers: opoplawski $inventories/gitlab_runners.py: maintainers: morph027 + $inventories/iocage.py: + maintainers: vbotka $inventories/icinga2.py: maintainers: BongoEADGC6 $inventories/linode.py: diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py new file mode 100644 index 0000000000..6b51bb346e --- /dev/null +++ b/plugins/inventory/iocage.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: iocage + short_description: iocage inventory source + version_added: 10.2.0 + author: + - Vladimir Botka (@vbotka) + requirements: + - iocage >= 1.8 + description: + - Get inventory hosts from the iocage jail manager running on O(host). + - By default, O(host) is V(localhost). If O(host) is not V(localhost) it + is expected that the user running Ansible on the controller can + connect to the O(host) account O(user) with SSH non-interactively and + execute the command C(iocage list). + - Uses a configuration file as an inventory source, it must end + in C(.iocage.yml) or C(.iocage.yaml). + extends_documentation_fragment: + - ansible.builtin.constructed + - ansible.builtin.inventory_cache + options: + plugin: + description: + - The name of this plugin, it should always be set to + V(community.general.iocage) for this plugin to recognize + it as its own. + required: true + choices: ['community.general.iocage'] + type: str + host: + description: The IP/hostname of the C(iocage) host. + type: str + default: localhost + user: + description: + - C(iocage) user. + It is expected that the O(user) is able to connect to the + O(host) with SSH and execute the command C(iocage list). + This option is not required if O(host) is V(localhost). + type: str + get_properties: + description: + - Get jails' properties. + Creates dictionary C(iocage_properties) for each added host. + type: boolean + default: false + env: + description: O(user)'s environment on O(host). + type: dict + default: {} + notes: + - You might want to test the command C(ssh user@host iocage list -l) on + the controller before using this inventory plugin with O(user) specified + and with O(host) other than V(localhost). + - If you run this inventory plugin on V(localhost) C(ssh) is not used. + In this case, test the command C(iocage list -l). + - This inventory plugin creates variables C(iocage_*) for each added host. + - The values of these variables are collected from the output of the + command C(iocage list -l). + - The names of these variables correspond to the output columns. + - The column C(NAME) is used to name the added host. +''' + +EXAMPLES = ''' +# file name must end with iocage.yaml or iocage.yml +plugin: community.general.iocage +host: 10.1.0.73 +user: admin + +# user is not required if iocage is running on localhost (default) +plugin: community.general.iocage + +# run cryptography without legacy algorithms +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +# enable cache +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true + +# see inventory plugin ansible.builtin.constructed +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true +strict: false +compose: + ansible_host: iocage_ip4 + release: iocage_release | split('-') | first +groups: + test: inventory_hostname.startswith('test') +keyed_groups: + - prefix: distro + key: iocage_release + - prefix: state + key: iocage_state +''' + +import re +import os +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +display = Display() + + +def _parse_ip4(ip4): + if ip4 == '-': + return ip4 + return re.split('\\||/', ip4)[1] + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using iocage as source. ''' + + NAME = 'community.general.iocage' + IOCAGE = '/usr/local/bin/iocage' + + def __init__(self): + super(InventoryModule, self).__init__() + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('iocage.yaml', 'iocage.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + cache_key = self.get_cache_key(path) + + user_cache_setting = self.get_option('cache') + attempt_to_read_cache = user_cache_setting and cache + cache_needs_update = user_cache_setting and not cache + + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + cache_needs_update = True + if not attempt_to_read_cache or cache_needs_update: + results = self.get_inventory(path) + if cache_needs_update: + self._cache[cache_key] = results + + self.populate(results) + + def get_inventory(self, path): + host = self.get_option('host') + env = self.get_option('env') + get_properties = self.get_option('get_properties') + + cmd = [] + my_env = os.environ.copy() + if host == 'localhost': + my_env.update({str(k): str(v) for k, v in env.items()}) + else: + user = self.get_option('user') + cmd.append("ssh") + cmd.append(f"{user}@{host}") + cmd.extend([f"{k}={v}" for k, v in env.items()]) + cmd.append(self.IOCAGE) + + cmd_list = cmd.copy() + cmd_list.append('list') + cmd_list.append('--header') + cmd_list.append('--long') + try: + p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError('Failed to run cmd=%s, rc=%s, stderr=%s' % + (cmd_list, p.returncode, to_native(stderr))) + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError('Invalid (non unicode) input returned: %s' % to_native(e)) from e + + except Exception as e: + raise AnsibleParserError('Failed to parse %s: %s' % + (to_native(path), to_native(e))) from e + + results = {'_meta': {'hostvars': {}}} + self.get_jails(t_stdout, results) + + if get_properties: + for hostname, host_vars in results['_meta']['hostvars'].items(): + cmd_get_properties = cmd.copy() + cmd_get_properties.append("get") + cmd_get_properties.append("--all") + cmd_get_properties.append(f"{hostname}") + try: + p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError('Failed to run cmd=%s, rc=%s, stderr=%s' % + (cmd_get_properties, p.returncode, to_native(stderr))) + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError('Invalid (non unicode) input returned: %s' % to_native(e)) from e + + except Exception as e: + raise AnsibleError('Failed to get properties: %s' % to_native(e)) from e + + self.get_properties(t_stdout, results, hostname) + + return results + + def get_jails(self, t_stdout, results): + jails = [x.split() for x in t_stdout.splitlines()] + for jail in jails: + iocage_name = jail[1] + results['_meta']['hostvars'][iocage_name] = {} + results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] + results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] + results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] + results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] + results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] + results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = _parse_ip4(jail[6]) + results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] + results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] + results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] + + def get_properties(self, t_stdout, results, hostname): + properties = dict([x.split(':', 1) for x in t_stdout.splitlines()]) + results['_meta']['hostvars'][hostname]['iocage_properties'] = properties + + def populate(self, results): + strict = self.get_option('strict') + + for hostname, host_vars in results['_meta']['hostvars'].items(): + self.inventory.add_host(hostname, group='all') + for var, value in host_vars.items(): + self.inventory.set_variable(hostname, var, value) + self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True) + self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict) diff --git a/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml b/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml new file mode 100644 index 0000000000..850a54f549 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml @@ -0,0 +1,460 @@ +all: + children: + test: + hosts: + test_101: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.101 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_101 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.101/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_101/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_102: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.102 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_102 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.102/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_102/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_103: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.103 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_103 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.103/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_103/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail diff --git a/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license b/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.txt b/tests/unit/plugins/inventory/fixtures/iocage_jails.txt new file mode 100644 index 0000000000..5152110550 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_jails.txt @@ -0,0 +1,3 @@ +- test_101 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.101/24 - ansible_client yes +- test_102 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.102/24 - ansible_client yes +- test_103 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.103/24 - ansible_client yes diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license b/tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.yml b/tests/unit/plugins/inventory/fixtures/iocage_jails.yml new file mode 100644 index 0000000000..08eaa2dce4 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_jails.yml @@ -0,0 +1,32 @@ +_meta: + hostvars: + test_101: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.101 + iocage_ip6: '-' + iocage_jid: '-' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_102: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.102 + iocage_ip6: '-' + iocage_jid: '-' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_103: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.103 + iocage_ip6: '-' + iocage_jid: '-' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license b/tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.txt b/tests/unit/plugins/inventory/fixtures/iocage_properties.txt new file mode 100644 index 0000000000..a24c8959ee --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties.txt @@ -0,0 +1,11 @@ +CONFIG_VERSION:28 +notes:abbridged_properties +allow_chflags:0 +allow_mlock:0 +allow_mount:0 +allow_mount_devfs:0 +allow_mount_fusefs:0 +allow_mount_nullfs:0 +allow_mount_procfs:0 +allow_mount_tmpfs:0 +allow_mount_zfs:0 diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license b/tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.yml b/tests/unit/plugins/inventory/fixtures/iocage_properties.yml new file mode 100644 index 0000000000..ffae1bf9d1 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties.yml @@ -0,0 +1,458 @@ +_meta: + hostvars: + test_101: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.101 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_101 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.101/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_101/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_102: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.102 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_102 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.102/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_102/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_103: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4: 10.1.0.103 + iocage_ip6: '-' + iocage_jid: '-' + iocage_properties: + CONFIG_VERSION: '28' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '0' + allow_mount_devfs: '0' + allow_mount_fusefs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '1' + boot: '0' + bpf: '0' + children_max: '0' + comment: none + compression: lz4 + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: 10.1.0.10 + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '0' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: ansible-client + host_hostuuid: test_103 + host_time: '1' + hostid: 34333834-3734-5a43-3331-313342464631 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: vnet0|10.1.0.103/24 + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/test_103/data + jail_zfs_mountpoint: none + last_started: none + localhost_ip: none + login_flags: -f root + mac_prefix: 3e4a92 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: vmm=iocage_01 + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 13.4-RELEASE-p2 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + stacksize: 'off' + state: down + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: none + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + iocage_release: 13.4-RELEASE-p2 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license b/tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt new file mode 100644 index 0000000000..881f347911 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt @@ -0,0 +1,141 @@ +CONFIG_VERSION:28 +allow_chflags:0 +allow_mlock:0 +allow_mount:0 +allow_mount_devfs:0 +allow_mount_fusefs:0 +allow_mount_nullfs:0 +allow_mount_procfs:0 +allow_mount_tmpfs:0 +allow_mount_zfs:0 +allow_quotas:0 +allow_raw_sockets:0 +allow_set_hostname:1 +allow_socket_af:0 +allow_sysvipc:0 +allow_tun:0 +allow_vmm:0 +assign_localhost:0 +available:readonly +basejail:1 +boot:0 +bpf:0 +children_max:0 +comment:none +compression:lz4 +compressratio:readonly +coredumpsize:off +count:1 +cpuset:off +cputime:off +datasize:off +dedup:off +defaultrouter:10.1.0.10 +defaultrouter6:auto +depends:none +devfs_ruleset:4 +dhcp:0 +enforce_statfs:2 +exec_clean:1 +exec_created:/usr/bin/true +exec_fib:0 +exec_jail_user:root +exec_poststart:/usr/bin/true +exec_poststop:/usr/bin/true +exec_prestart:/usr/bin/true +exec_prestop:/usr/bin/true +exec_start:/bin/sh /etc/rc +exec_stop:/bin/sh /etc/rc.shutdown +exec_system_jail_user:0 +exec_system_user:root +exec_timeout:60 +host_domainname:none +host_hostname:ansible-client +host_hostuuid:test_101 +host_time:1 +hostid:34333834-3734-5a43-3331-313342464631 +hostid_strict_check:0 +interfaces:vnet0:bridge0 +ip4:new +ip4_addr:vnet0|10.1.0.101/24 +ip4_saddrsel:1 +ip6:new +ip6_addr:none +ip6_saddrsel:1 +ip_hostname:0 +jail_zfs:0 +jail_zfs_dataset:iocage/jails/test_101/data +jail_zfs_mountpoint:none +last_started:none +localhost_ip:none +login_flags:-f root +mac_prefix:3e4a92 +maxproc:off +memorylocked:off +memoryuse:off +min_dyn_devfs_ruleset:1000 +mount_devfs:1 +mount_fdescfs:1 +mount_linprocfs:0 +mount_procfs:0 +mountpoint:readonly +msgqqueued:off +msgqsize:off +nat:0 +nat_backend:ipfw +nat_forwards:none +nat_interface:none +nat_prefix:172.16 +nmsgq:off +notes:vmm=iocage_01 +nsem:off +nsemop:off +nshm:off +nthr:off +openfiles:off +origin:readonly +owner:root +pcpu:off +plugin_name:none +plugin_repository:none +priority:99 +pseudoterminals:off +quota:none +readbps:off +readiops:off +release:13.4-RELEASE-p2 +reservation:none +resolver:/etc/resolv.conf +rlimits:off +rtsold:0 +securelevel:2 +shmsize:off +stacksize:off +state:down +stop_timeout:30 +swapuse:off +sync_state:none +sync_target:none +sync_tgt_zpool:none +sysvmsg:new +sysvsem:new +sysvshm:new +template:0 +type:jail +used:readonly +vmemoryuse:off +vnet:1 +vnet0_mac:none +vnet0_mtu:auto +vnet1_mac:none +vnet1_mtu:auto +vnet2_mac:none +vnet2_mtu:auto +vnet3_mac:none +vnet3_mtu:auto +vnet_default_interface:auto +vnet_default_mtu:1500 +vnet_interfaces:none +wallclock:off +writebps:off +writeiops:off diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt new file mode 100644 index 0000000000..065c777b9c --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt @@ -0,0 +1,141 @@ +CONFIG_VERSION:28 +allow_chflags:0 +allow_mlock:0 +allow_mount:0 +allow_mount_devfs:0 +allow_mount_fusefs:0 +allow_mount_nullfs:0 +allow_mount_procfs:0 +allow_mount_tmpfs:0 +allow_mount_zfs:0 +allow_quotas:0 +allow_raw_sockets:0 +allow_set_hostname:1 +allow_socket_af:0 +allow_sysvipc:0 +allow_tun:0 +allow_vmm:0 +assign_localhost:0 +available:readonly +basejail:1 +boot:0 +bpf:0 +children_max:0 +comment:none +compression:lz4 +compressratio:readonly +coredumpsize:off +count:1 +cpuset:off +cputime:off +datasize:off +dedup:off +defaultrouter:10.1.0.10 +defaultrouter6:auto +depends:none +devfs_ruleset:4 +dhcp:0 +enforce_statfs:2 +exec_clean:1 +exec_created:/usr/bin/true +exec_fib:0 +exec_jail_user:root +exec_poststart:/usr/bin/true +exec_poststop:/usr/bin/true +exec_prestart:/usr/bin/true +exec_prestop:/usr/bin/true +exec_start:/bin/sh /etc/rc +exec_stop:/bin/sh /etc/rc.shutdown +exec_system_jail_user:0 +exec_system_user:root +exec_timeout:60 +host_domainname:none +host_hostname:ansible-client +host_hostuuid:test_102 +host_time:1 +hostid:34333834-3734-5a43-3331-313342464631 +hostid_strict_check:0 +interfaces:vnet0:bridge0 +ip4:new +ip4_addr:vnet0|10.1.0.102/24 +ip4_saddrsel:1 +ip6:new +ip6_addr:none +ip6_saddrsel:1 +ip_hostname:0 +jail_zfs:0 +jail_zfs_dataset:iocage/jails/test_102/data +jail_zfs_mountpoint:none +last_started:none +localhost_ip:none +login_flags:-f root +mac_prefix:3e4a92 +maxproc:off +memorylocked:off +memoryuse:off +min_dyn_devfs_ruleset:1000 +mount_devfs:1 +mount_fdescfs:1 +mount_linprocfs:0 +mount_procfs:0 +mountpoint:readonly +msgqqueued:off +msgqsize:off +nat:0 +nat_backend:ipfw +nat_forwards:none +nat_interface:none +nat_prefix:172.16 +nmsgq:off +notes:vmm=iocage_01 +nsem:off +nsemop:off +nshm:off +nthr:off +openfiles:off +origin:readonly +owner:root +pcpu:off +plugin_name:none +plugin_repository:none +priority:99 +pseudoterminals:off +quota:none +readbps:off +readiops:off +release:13.4-RELEASE-p2 +reservation:none +resolver:/etc/resolv.conf +rlimits:off +rtsold:0 +securelevel:2 +shmsize:off +stacksize:off +state:down +stop_timeout:30 +swapuse:off +sync_state:none +sync_target:none +sync_tgt_zpool:none +sysvmsg:new +sysvsem:new +sysvshm:new +template:0 +type:jail +used:readonly +vmemoryuse:off +vnet:1 +vnet0_mac:none +vnet0_mtu:auto +vnet1_mac:none +vnet1_mtu:auto +vnet2_mac:none +vnet2_mtu:auto +vnet3_mac:none +vnet3_mtu:auto +vnet_default_interface:auto +vnet_default_mtu:1500 +vnet_interfaces:none +wallclock:off +writebps:off +writeiops:off diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt new file mode 100644 index 0000000000..0050a989f5 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt @@ -0,0 +1,141 @@ +CONFIG_VERSION:28 +allow_chflags:0 +allow_mlock:0 +allow_mount:0 +allow_mount_devfs:0 +allow_mount_fusefs:0 +allow_mount_nullfs:0 +allow_mount_procfs:0 +allow_mount_tmpfs:0 +allow_mount_zfs:0 +allow_quotas:0 +allow_raw_sockets:0 +allow_set_hostname:1 +allow_socket_af:0 +allow_sysvipc:0 +allow_tun:0 +allow_vmm:0 +assign_localhost:0 +available:readonly +basejail:1 +boot:0 +bpf:0 +children_max:0 +comment:none +compression:lz4 +compressratio:readonly +coredumpsize:off +count:1 +cpuset:off +cputime:off +datasize:off +dedup:off +defaultrouter:10.1.0.10 +defaultrouter6:auto +depends:none +devfs_ruleset:4 +dhcp:0 +enforce_statfs:2 +exec_clean:1 +exec_created:/usr/bin/true +exec_fib:0 +exec_jail_user:root +exec_poststart:/usr/bin/true +exec_poststop:/usr/bin/true +exec_prestart:/usr/bin/true +exec_prestop:/usr/bin/true +exec_start:/bin/sh /etc/rc +exec_stop:/bin/sh /etc/rc.shutdown +exec_system_jail_user:0 +exec_system_user:root +exec_timeout:60 +host_domainname:none +host_hostname:ansible-client +host_hostuuid:test_103 +host_time:1 +hostid:34333834-3734-5a43-3331-313342464631 +hostid_strict_check:0 +interfaces:vnet0:bridge0 +ip4:new +ip4_addr:vnet0|10.1.0.103/24 +ip4_saddrsel:1 +ip6:new +ip6_addr:none +ip6_saddrsel:1 +ip_hostname:0 +jail_zfs:0 +jail_zfs_dataset:iocage/jails/test_103/data +jail_zfs_mountpoint:none +last_started:none +localhost_ip:none +login_flags:-f root +mac_prefix:3e4a92 +maxproc:off +memorylocked:off +memoryuse:off +min_dyn_devfs_ruleset:1000 +mount_devfs:1 +mount_fdescfs:1 +mount_linprocfs:0 +mount_procfs:0 +mountpoint:readonly +msgqqueued:off +msgqsize:off +nat:0 +nat_backend:ipfw +nat_forwards:none +nat_interface:none +nat_prefix:172.16 +nmsgq:off +notes:vmm=iocage_01 +nsem:off +nsemop:off +nshm:off +nthr:off +openfiles:off +origin:readonly +owner:root +pcpu:off +plugin_name:none +plugin_repository:none +priority:99 +pseudoterminals:off +quota:none +readbps:off +readiops:off +release:13.4-RELEASE-p2 +reservation:none +resolver:/etc/resolv.conf +rlimits:off +rtsold:0 +securelevel:2 +shmsize:off +stacksize:off +state:down +stop_timeout:30 +swapuse:off +sync_state:none +sync_target:none +sync_tgt_zpool:none +sysvmsg:new +sysvsem:new +sysvshm:new +template:0 +type:jail +used:readonly +vmemoryuse:off +vnet:1 +vnet0_mac:none +vnet0_mtu:auto +vnet1_mac:none +vnet1_mtu:auto +vnet2_mac:none +vnet2_mtu:auto +vnet3_mac:none +vnet3_mtu:auto +vnet_default_interface:auto +vnet_default_mtu:1500 +vnet_interfaces:none +wallclock:off +writebps:off +writeiops:off diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/test_iocage.py b/tests/unit/plugins/inventory/test_iocage.py new file mode 100644 index 0000000000..1a0aa22d16 --- /dev/null +++ b/tests/unit/plugins/inventory/test_iocage.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import yaml + +from ansible.inventory.data import InventoryData +from ansible.template import Templar +from ansible_collections.community.general.plugins.inventory.iocage import InventoryModule + + +@pytest.fixture +def inventory(): + inv = InventoryModule() + inv.inventory = InventoryData() + inv.templar = Templar(None) + inv.jails = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_jails.txt') + inv.js_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_jails.yml') + prpts_101 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt') + prpts_102 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt') + prpts_103 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt') + inv.prpts = {'test_101': prpts_101, 'test_102': prpts_102, 'test_103': prpts_103} + inv.ps_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_properties.yml') + inv.ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_inventory.yml') + return inv + + +def load_txt_data(path): + f = open(path, 'r') + s = f.read() + f.close() + return s + + +def load_yml_data(path): + f = open(path, 'r') + d = yaml.safe_load(f) + f.close() + return d + + +def get_option(option): + groups = {} + groups['test'] = "inventory_hostname.startswith('test')" + + if option == 'groups': + return groups + elif option == 'keyed_groups': + return [] + elif option == 'compose': + return {} + elif option == 'strict': + return False + else: + return None + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file('foobar.iocage.yml') is False + + +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.iocage.yml" + file.touch() + assert inventory.verify_file(str(file)) + + +def test_get_jails(inventory): + results = {'_meta': {'hostvars': {}}} + inventory.get_jails(inventory.jails, results) + assert results == inventory.js_ok + + +def test_get_properties(inventory): + results = {'_meta': {'hostvars': {}}} + inventory.get_jails(inventory.jails, results) + for hostname, host_vars in results['_meta']['hostvars'].items(): + inventory.get_properties(inventory.prpts[hostname], results, hostname) + assert results == inventory.ps_ok + + +def test_populate(inventory, mocker): + results = {'_meta': {'hostvars': {}}} + inventory.get_jails(inventory.jails, results) + for hostname, host_vars in results['_meta']['hostvars'].items(): + inventory.get_properties(inventory.prpts[hostname], results, hostname) + inventory.get_option = mocker.MagicMock(side_effect=get_option) + inventory.populate(results) + + # test + hosts = ('test_101', 'test_102', 'test_103') + vars = ('iocage_basejail', 'iocage_boot', 'iocage_ip4', 'iocage_ip6', 'iocage_properties', + 'iocage_release', 'iocage_state', 'iocage_template', 'iocage_type') + + # test host_vars + for host in hosts: + h = inventory.inventory.get_host(host) + for var in vars: + assert inventory.ok['all']['children']['test']['hosts'][host][var] == h.get_vars()[var] + + # test groups + test_101_info = inventory.inventory.get_host('test_101') + test_102_info = inventory.inventory.get_host('test_102') + test_103_info = inventory.inventory.get_host('test_103') + g = inventory.inventory.groups['test'] + assert g.hosts == [test_101_info, test_102_info, test_103_info] From 13e2097f378771d18df58b741d96020d56d5197c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 10:48:04 +1300 Subject: [PATCH 408/482] [pi ... prof]*: normalize docs (#9371) * [pi ... prof]*: normalize docs * Update plugins/modules/pkg5_publisher.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/pids.py | 21 +- plugins/modules/pingdom.py | 81 ++++---- plugins/modules/pip_package_info.py | 25 +-- plugins/modules/pipx.py | 138 +++++++------- plugins/modules/pipx_info.py | 35 ++-- plugins/modules/pkg5.py | 19 +- plugins/modules/pkg5_publisher.py | 19 +- plugins/modules/pkgin.py | 108 +++++------ plugins/modules/pkgng.py | 179 +++++++++--------- plugins/modules/pkgutil.py | 49 +++-- plugins/modules/pmem.py | 148 +++++++-------- plugins/modules/pnpm.py | 34 ++-- plugins/modules/portage.py | 82 ++++---- plugins/modules/portinstall.py | 59 +++--- plugins/modules/pritunl_org.py | 71 ++++--- plugins/modules/pritunl_org_info.py | 33 ++-- plugins/modules/pritunl_user.py | 154 +++++++-------- plugins/modules/pritunl_user_info.py | 59 +++--- plugins/modules/profitbricks.py | 40 ++-- plugins/modules/profitbricks_datacenter.py | 27 ++- plugins/modules/profitbricks_nic.py | 27 ++- plugins/modules/profitbricks_volume.py | 35 ++-- .../profitbricks_volume_attachments.py | 25 ++- 23 files changed, 702 insertions(+), 766 deletions(-) diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py index 99b52ef1dd..aea4d82d37 100644 --- a/plugins/modules/pids.py +++ b/plugins/modules/pids.py @@ -7,9 +7,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pids -description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in + that name exists." short_description: Retrieves process IDs list if the process is running otherwise return empty list author: - Saranya Sridharan (@saranyasridharan) @@ -35,13 +36,13 @@ options: type: bool default: false version_added: 3.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Pass the process name - name: Getting process IDs of the process community.general.pids: - name: python + name: python register: pids_of_python - name: Printing the process IDs obtained @@ -52,15 +53,15 @@ EXAMPLES = r''' community.general.pids: pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py register: myapp_pids -''' +""" -RETURN = ''' +RETURN = r""" pids: - description: Process IDs of the given process + description: Process IDs of the given process. returned: list of none, one, or more process IDs type: list - sample: [100,200] -''' + sample: [100, 200] +""" import abc import re diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py index bd4826a780..192dd244f2 100644 --- a/plugins/modules/pingdom.py +++ b/plugins/modules/pingdom.py @@ -8,56 +8,55 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pingdom short_description: Pause/unpause Pingdom alerts description: - - This module will let you pause/unpause Pingdom alerts + - This module will let you pause/unpause Pingdom alerts. author: - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" + - "This pingdom python library: U(https://github.com/mbabineau/pingdom-python)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Define whether or not the check should be running or paused. - required: true - choices: [ "running", "paused", "started", "stopped" ] - checkid: - type: str - description: - - Pingdom ID of the check. - required: true - uid: - type: str - description: - - Pingdom user ID. - required: true - passwd: - type: str - description: - - Pingdom user password. - required: true - key: - type: str - description: - - Pingdom API key. - required: true + state: + type: str + description: + - Define whether or not the check should be running or paused. + required: true + choices: ["running", "paused", "started", "stopped"] + checkid: + type: str + description: + - Pingdom ID of the check. + required: true + uid: + type: str + description: + - Pingdom user ID. + required: true + passwd: + type: str + description: + - Pingdom user password. + required: true + key: + type: str + description: + - Pingdom API key. + required: true notes: - - This module does not yet have support to add/remove checks. -''' + - This module does not yet have support to add/remove checks. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Pause the check with the ID of 12345 community.general.pingdom: uid: example@example.com @@ -73,7 +72,7 @@ EXAMPLES = ''' key: apipassword123 checkid: 12345 state: running -''' +""" import traceback diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py index f7354e3678..0bc08e7ce1 100644 --- a/plugins/modules/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -9,33 +9,33 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pip_package_info short_description: Pip package information description: - - Return information about installed pip packages + - Return information about installed pip packages. extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module options: clients: description: - - A list of the pip executables that will be used to get the packages. - They can be supplied with the full path or just the executable name, for example V(pip3.7). + - A list of the pip executables that will be used to get the packages. They can be supplied with the full path or just the executable name, + for example V(pip3.7). default: ['pip'] required: false type: list elements: path requirements: - pip >= 20.3b1 (necessary for the C(--format) option) - - The requested pip executables must be installed on the target. + - The requested C(pip) executables must be installed on the target. author: - Matthew Jones (@matburt) - Brian Coca (@bcoca) - Adam Miller (@maxamillion) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Just get the list from default pip community.general.pip_package_info: @@ -46,16 +46,16 @@ EXAMPLES = ''' - name: Get from specific paths (virtualenvs?) community.general.pip_package_info: clients: '/home/me/projec42/python/pip3.5' -''' +""" -RETURN = ''' +RETURN = r""" packages: - description: a dictionary of installed package data + description: A dictionary of installed package data. returned: always type: dict contains: python: - description: A dictionary with each pip client which then contains a list of dicts with python package information + description: A dictionary with each pip client which then contains a list of dicts with python package information. returned: always type: dict sample: @@ -91,7 +91,8 @@ packages: ], }, } -''' +""" + import json import os diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index 1706f125d9..aa4309ce6c 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -9,16 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pipx short_description: Manages applications installed with pipx version_added: 3.8.0 description: -- Manage Python applications installed in isolated virtualenvs using pipx. + - Manage Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: -- community.general.attributes -- community.general.pipx + - community.general.attributes + - community.general.pipx attributes: check_mode: support: full @@ -28,131 +27,128 @@ options: state: type: str choices: - - present - - absent - - install - - install_all - - uninstall - - uninstall_all - - inject - - uninject - - upgrade - - upgrade_shared - - upgrade_all - - reinstall - - reinstall_all - - latest - - pin - - unpin + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin default: install description: - - Desired state for the application. - - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. - - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general - 5.5.0. - - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have a - compatible version when using this option. These states have been added in community.general 9.4.0. + - Desired state for the application. + - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general + 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have + a compatible version when using this option. These states have been added in community.general 9.4.0. name: type: str description: - - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application - will be installed. - - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed. - - Use O(source) for passing package specifications or installing from URLs or directories. + - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application + will be installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed. + - Use O(source) for passing package specifications or installing from URLs or directories. source: type: str description: - - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. - - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. - - The value of this option is passed as-is to C(pipx). - - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. install_apps: description: - - Add apps from the injected packages. - - Only used when O(state=inject). + - Add apps from the injected packages. + - Only used when O(state=inject). type: bool default: false version_added: 6.5.0 install_deps: description: - - Include applications of dependent packages. - - Only used when O(state=install), O(state=latest), or O(state=inject). + - Include applications of dependent packages. + - Only used when O(state=install), O(state=latest), or O(state=inject). type: bool default: false inject_packages: description: - - Packages to be injected into an existing virtual environment. - - Only used when O(state=inject). + - Packages to be injected into an existing virtual environment. + - Only used when O(state=inject). type: list elements: str force: description: - - Force modification of the application's virtual environment. See C(pipx) for details. - - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). type: bool default: false include_injected: description: - - Upgrade the injected packages along with the application. - - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). - - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. + - Upgrade the injected packages along with the application. + - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). + - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. type: bool default: false index_url: description: - - Base URL of Python Package Index. - - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). + - Base URL of Python Package Index. + - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). type: str python: description: - - Python version to be used when creating the application virtual environment. Must be 3.6+. - - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). type: str system_site_packages: description: - - Give application virtual environment access to the system site-packages directory. - - Only used when O(state=install) or O(state=latest). + - Give application virtual environment access to the system site-packages directory. + - Only used when O(state=install) or O(state=latest). type: bool default: false version_added: 6.6.0 editable: description: - - Install the project in editable mode. + - Install the project in editable mode. type: bool default: false version_added: 4.6.0 pip_args: description: - - Arbitrary arguments to pass directly to C(pip). + - Arbitrary arguments to pass directly to C(pip). type: str version_added: 4.6.0 suffix: description: - - Optional suffix for virtual environment and executable names. - - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change." + - Optional suffix for virtual environment and executable names. + - B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change. type: str version_added: 9.3.0 global: version_added: 9.4.0 spec_metadata: description: - - Spec metadata file for O(state=install_all). - - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with - O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with + O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). type: path version_added: 9.4.0 notes: -- > - This first implementation does not verify whether a specified version constraint has been installed or not. - Hence, when using version operators, C(pipx) module will always try to execute the operation, - even when the application was previously installed. - This feature will be added in the future. + - This first implementation does not verify whether a specified version constraint has been installed or not. Hence, when using version operators, + C(pipx) module will always try to execute the operation, even when the application was previously installed. This feature will be added in + the future. author: -- "Alexei Znamensky (@russoz)" + - "Alexei Znamensky (@russoz)" """ -EXAMPLES = """ ---- +EXAMPLES = r""" - name: Install tox community.general.pipx: name: tox @@ -181,16 +177,16 @@ EXAMPLES = """ - name: Install multiple packages from list vars: pipx_packages: - - pycowsay - - black - - tox + - pycowsay + - black + - tox community.general.pipx: name: "{{ item }}" state: latest with_items: "{{ pipx_packages }}" """ -RETURN = """ +RETURN = r""" version: description: Version of pipx. type: str diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 24a6739024..91d2fdb21c 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -9,46 +9,44 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pipx_info short_description: Rretrieves information about applications installed with pipx version_added: 5.6.0 description: -- Retrieve details about Python applications installed in isolated virtualenvs using pipx. + - Retrieve details about Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: -- community.general.attributes -- community.general.attributes.info_module -- community.general.pipx + - community.general.attributes + - community.general.attributes.info_module + - community.general.pipx options: name: description: - - Name of an application installed with C(pipx). + - Name of an application installed with C(pipx). type: str include_deps: description: - - Include dependent packages in the output. + - Include dependent packages in the output. type: bool default: false include_injected: description: - - Include injected packages in the output. + - Include injected packages in the output. type: bool default: false include_raw: description: - - Returns the raw output of C(pipx list --json). - - The raw output is not affected by O(include_deps) or O(include_injected). + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by O(include_deps) or O(include_injected). type: bool default: false global: version_added: 9.3.0 author: -- "Alexei Znamensky (@russoz)" + - "Alexei Znamensky (@russoz)" """ -EXAMPLES = """ ---- +EXAMPLES = r""" - name: retrieve all installed applications community.general.pipx_info: {} @@ -68,10 +66,9 @@ EXAMPLES = """ include_deps: true """ -RETURN = """ ---- +RETURN = r""" application: - description: The list of installed applications + description: The list of installed applications. returned: success type: list elements: dict @@ -100,8 +97,8 @@ application: licenses: "0.6.1" pinned: description: - - Whether the installed application is pinned or not. - - When using C(pipx<=1.6.0), this returns C(null). + - Whether the installed application is pinned or not. + - When using C(pipx<=1.6.0), this returns C(null). returned: success type: bool sample: diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py index 08fa9272f7..34e86c3774 100644 --- a/plugins/modules/pkg5.py +++ b/plugins/modules/pkg5.py @@ -8,11 +8,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkg5 author: -- Peter Oliver (@mavit) + - Peter Oliver (@mavit) short_description: Manages packages with the Solaris 11 Image Packaging System description: - IPS packages are the native packages in Solaris 11 and higher. @@ -36,7 +35,7 @@ options: state: description: - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. - choices: [ absent, latest, present, installed, removed, uninstalled ] + choices: [absent, latest, present, installed, removed, uninstalled] default: present type: str accept_licenses: @@ -44,7 +43,7 @@ options: - Accept any licences. type: bool default: false - aliases: [ accept, accept_licences ] + aliases: [accept, accept_licences] be_name: description: - Creates a new boot environment with the given name. @@ -60,8 +59,8 @@ options: type: bool default: false version_added: 9.0.0 -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Install Vim community.general.pkg5: name: editor/vim @@ -79,9 +78,9 @@ EXAMPLES = ''' - name: Install several packages at once community.general.pkg5: name: - - /file/gnu-findutils - - /text/gnu-grep -''' + - /file/gnu-findutils + - /text/gnu-grep +""" import re diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py index 6d07e455f4..01c9d48cce 100644 --- a/plugins/modules/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -10,15 +10,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkg5_publisher author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers description: - IPS packages are the native packages in Solaris 11 and higher. - - This modules will configure which publishers a client will download IPS - packages from. + - This modules will configure which publishers a client will download IPS packages from. extends_documentation_fragment: - community.general.attributes attributes: @@ -31,18 +29,17 @@ options: description: - The publisher's name. required: true - aliases: [ publisher ] + aliases: [publisher] type: str state: description: - Whether to ensure that a publisher is present or absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str sticky: description: - - Packages installed from a sticky repository can only receive updates - from that repository. + - Packages installed from a sticky repository can only receive updates from that repository. type: bool enabled: description: @@ -60,8 +57,8 @@ options: - Multiple values may be provided. type: list elements: str -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Fetch packages for the solaris publisher direct from Oracle community.general.pkg5_publisher: name: solaris @@ -72,7 +69,7 @@ EXAMPLES = ''' community.general.pkg5_publisher: name: site origin: 'https://pkg.example.com/site/' -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py index 8b29655d37..76423f8135 100644 --- a/plugins/modules/pkgin.py +++ b/plugins/modules/pkgin.py @@ -16,70 +16,66 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkgin short_description: Package manager for SmartOS, NetBSD, et al description: - - "The standard package manager for SmartOS, but also usable on NetBSD - or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" + - 'The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/)).' author: - - "Larry Gilbert (@L2G)" - - "Shaun Zinck (@szinck)" - - "Jasper Lievisse Adriaanse (@jasperla)" + - "Larry Gilbert (@L2G)" + - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" notes: - - "Known bug with pkgin < 0.8.0: if a package is removed and another - package depends on it, the other package will be silently removed as - well." + - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as well.' extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of package to install/remove; - - multiple names may be given, separated by commas - aliases: [pkg] - type: list - elements: str - state: - description: - - Intended state of the package - choices: [ 'present', 'absent' ] - default: present - type: str - update_cache: - description: - - Update repository database. Can be run with other steps or on it's own. - type: bool - default: false - upgrade: - description: - - Upgrade main packages to their newer versions - type: bool - default: false - full_upgrade: - description: - - Upgrade all packages to their newer versions - type: bool - default: false - clean: - description: - - Clean packages cache - type: bool - default: false - force: - description: - - Force package reinstall - type: bool - default: false -''' + name: + description: + - Name of package to install/remove; + - Multiple names may be given, separated by commas. + aliases: [pkg] + type: list + elements: str + state: + description: + - Intended state of the package. + choices: ['present', 'absent'] + default: present + type: str + update_cache: + description: + - Update repository database. Can be run with other steps or on it's own. + type: bool + default: false + upgrade: + description: + - Upgrade main packages to their newer versions. + type: bool + default: false + full_upgrade: + description: + - Upgrade all packages to their newer versions. + type: bool + default: false + clean: + description: + - Clean packages cache. + type: bool + default: false + force: + description: + - Force package reinstall. + type: bool + default: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.pkgin: name: foo @@ -125,7 +121,7 @@ EXAMPLES = ''' - name: Clean packages cache (equivalent to pkgin clean) community.general.pkgin: clean: true -''' +""" import re diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index 7a04ee3a6e..ae333b492b 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -14,107 +14,100 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkgng short_description: Package manager for FreeBSD >= 9.0 description: - - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. + - Manage binary packages for FreeBSD using C(pkgng) which is available in versions after 9.0. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name or list of names of packages to install/remove. - - "With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops." - required: true - aliases: [pkg] - type: list - elements: str - state: - description: - - State of the package. - choices: [ 'present', 'latest', 'absent' ] - required: false - default: present - type: str - cached: - description: - - Use local package base instead of fetching an updated one. - type: bool - required: false - default: false - annotation: - description: - - A list of keyvalue-pairs of the form - C(<+/-/:>[=]). A V(+) denotes adding an annotation, a - V(-) denotes removing an annotation, and V(:) denotes modifying an - annotation. - If setting or modifying annotations, a value must be provided. - required: false - type: list - elements: str - pkgsite: - description: - - For pkgng versions before 1.1.4, specify packagesite to use - for downloading packages. If not specified, use settings from - C(/usr/local/etc/pkg.conf). - - For newer pkgng versions, specify a the name of a repository - configured in C(/usr/local/etc/pkg/repos). - required: false - type: str - rootdir: - description: - - For pkgng versions 1.5 and later, pkg will install all packages - within the specified root directory. - - Can not be used together with O(chroot) or O(jail) options. - required: false - type: path - chroot: - description: - - Pkg will chroot in the specified environment. - - Can not be used together with O(rootdir) or O(jail) options. - required: false - type: path - jail: - description: - - Pkg will execute in the given jail name or id. - - Can not be used together with O(chroot) or O(rootdir) options. - type: str - autoremove: - description: - - Remove automatically installed packages which are no longer needed. - required: false - type: bool - default: false - ignore_osver: - description: - - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches. - - Defines the E(IGNORE_OSVERSION) environment variable. - required: false - type: bool - default: false - version_added: 1.3.0 - use_globs: - description: - - Treat the package names as shell glob patterns. - required: false - type: bool - default: true - version_added: 9.3.0 + name: + description: + - Name or list of names of packages to install/remove. + - With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops. + required: true + aliases: [pkg] + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'latest', 'absent'] + required: false + default: present + type: str + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: false + annotation: + description: + - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes removing an annotation, + and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must be provided. + required: false + type: list + elements: str + pkgsite: + description: + - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. + If not specified, use settings from C(/usr/local/etc/pkg.conf). + - For newer C(pkgng) versions, specify a the name of a repository configured in C(/usr/local/etc/pkg/repos). + required: false + type: str + rootdir: + description: + - For C(pkgng) versions 1.5 and later, pkg will install all packages within the specified root directory. + - Can not be used together with O(chroot) or O(jail) options. + required: false + type: path + chroot: + description: + - Pkg will chroot in the specified environment. + - Can not be used together with O(rootdir) or O(jail) options. + required: false + type: path + jail: + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with O(chroot) or O(rootdir) options. + type: str + autoremove: + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: false + ignore_osver: + description: + - Ignore FreeBSD OS version check, useful on C(-STABLE) and C(-CURRENT) branches. + - Defines the E(IGNORE_OSVERSION) environment variable. + required: false + type: bool + default: false + version_added: 1.3.0 + use_globs: + description: + - Treat the package names as shell glob patterns. + required: false + type: bool + default: true + version_added: 9.3.0 author: "bleader (@bleader)" notes: - - When using pkgsite, be careful that already in cache packages won't be downloaded again. - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. -''' + - When using pkgsite, be careful that already in cache packages will not be downloaded again. + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) + option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.pkgng: name: foo @@ -149,7 +142,7 @@ EXAMPLES = ''' name: foo/bar state: latest use_globs: false -''' +""" from collections import defaultdict diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py index 15f98a9d49..ce85308651 100644 --- a/plugins/modules/pkgutil.py +++ b/plugins/modules/pkgutil.py @@ -12,19 +12,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pkgutil short_description: OpenCSW package management on Solaris description: -- This module installs, updates and removes packages from the OpenCSW project for Solaris. -- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. -- See U(https://www.opencsw.org/) for more information about the project. + - This module installs, updates and removes packages from the OpenCSW project for Solaris. + - Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. + - See U(https://www.opencsw.org/) for more information about the project. author: -- Alexander Winkler (@dermute) -- David Ponessa (@scathatheworm) + - Alexander Winkler (@dermute) + - David Ponessa (@scathatheworm) extends_documentation_fragment: -- community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -35,40 +34,40 @@ attributes: options: name: description: - - The name of the package. - - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil. + - The name of the package. + - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil. type: list required: true elements: str - aliases: [ pkg ] + aliases: [pkg] site: description: - - The repository path to install the package from. - - Its global definition is in C(/etc/opt/csw/pkgutil.conf). + - The repository path to install the package from. + - Its global definition is in C(/etc/opt/csw/pkgutil.conf). required: false type: str state: description: - - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages. - - The upgrade (V(latest)) operation will update/install the packages to the latest version available. + - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages. + - The upgrade (V(latest)) operation will update/install the packages to the latest version available. type: str required: true - choices: [ absent, installed, latest, present, removed ] + choices: [absent, installed, latest, present, removed] update_catalog: description: - - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to V(true). + - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to V(true). type: bool default: false force: description: - - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true). - - This is useful for rolling back to stable from testing, or similar operations. + - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true). + - This is useful for rolling back to stable from testing, or similar operations. type: bool default: false version_added: 1.2.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install a package community.general.pkgutil: name: CSWcommon @@ -88,8 +87,8 @@ EXAMPLES = r''' - name: Install several packages community.general.pkgutil: name: - - CSWsudo - - CSWtop + - CSWsudo + - CSWtop state: present - name: Update all packages @@ -102,9 +101,9 @@ EXAMPLES = r''' name: '*' state: latest force: true -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py index 4d10c448e1..4e6edfeb06 100644 --- a/plugins/modules/pmem.py +++ b/plugins/modules/pmem.py @@ -7,21 +7,19 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Masayoshi Mizuma (@mizumm) + - Masayoshi Mizuma (@mizumm) module: pmem short_description: Configure Intel Optane Persistent Memory modules version_added: 4.5.0 description: - - This module allows Configuring Intel Optane Persistent Memory modules - (PMem) using ipmctl and ndctl command line tools. + - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line tools. requirements: - - ipmctl and ndctl command line tools - - xmltodict + - C(ipmctl) and C(ndctl) command line tools + - xmltodict extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: none @@ -30,33 +28,31 @@ attributes: options: appdirect: description: - - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)). - - Create AppDirect capacity utilizing hardware interleaving across the - requested PMem modules if applicable given the specified target. - - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)). + - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the specified target. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100). type: int appdirect_interleaved: description: - - Create AppDirect capacity that is interleaved any other PMem modules. + - Create AppDirect capacity that is interleaved any other PMem modules. type: bool required: false default: true memorymode: description: - - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)). + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)). type: int reserved: description: - - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped - into the system physical address space and will be presented as reserved - capacity with Show Device and Show Memory Resources Commands. - - O(reserved) will be set automatically if this is not configured. + - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped into the system physical address space and will be + presented as reserved capacity with Show Device and Show Memory Resources Commands. + - O(reserved) will be set automatically if this is not configured. type: int required: false socket: description: - - This enables to set the configuration for each socket by using the socket ID. - - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket. + - This enables to set the configuration for each socket by using the socket ID. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket. type: list elements: dict suboptions: @@ -66,18 +62,18 @@ options: required: true appdirect: description: - - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID. + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID. type: int required: true appdirect_interleaved: description: - - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. + - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. type: bool required: false default: true memorymode: description: - - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID. + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID. type: int required: true reserved: @@ -86,86 +82,86 @@ options: type: int namespace: description: - - This enables to set the configuration for the namespace of the PMem. + - This enables to set the configuration for the namespace of the PMem. type: list elements: dict suboptions: mode: description: - - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. + - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. type: str required: true choices: ['raw', 'sector', 'fsdax', 'devdax'] type: description: - - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. + - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. type: str required: false choices: ['pmem', 'blk'] size: description: - - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, - V(m) or V(M) or V(MB) for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. + - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB) for MiB, V(g) or V(G) + or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. - This option is required if multiple namespaces are configured. - If this option is not set, all of the available space of a region is configured. type: str required: false namespace_append: description: - - Enable to append the new namespaces to the system. - - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed. + - Enable to append the new namespaces to the system. + - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed. type: bool default: false required: false -''' +""" -RETURN = r''' +RETURN = r""" reboot_required: - description: Indicates that the system reboot is required to complete the PMem configuration. - returned: success - type: bool - sample: true + description: Indicates that the system reboot is required to complete the PMem configuration. + returned: success + type: bool + sample: true result: - description: - - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. - - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. - - If O(namespace) argument is provided, shows the detail of each namespace. - returned: success - type: list - elements: dict - contains: - appdirect: - description: AppDirect size in bytes. - type: int - memorymode: - description: Memory Mode size in bytes. - type: int - reserved: - description: Reserved size in bytes. - type: int - socket: - description: The socket ID to be configured. - type: int - namespace: - description: The list of the detail of namespace. - type: list - sample: [ - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 0 - }, - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 1 - } - ] -''' + description: + - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. + - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. + - If O(namespace) argument is provided, shows the detail of each namespace. + returned: success + type: list + elements: dict + contains: + appdirect: + description: AppDirect size in bytes. + type: int + memorymode: + description: Memory Mode size in bytes. + type: int + reserved: + description: Reserved size in bytes. + type: int + socket: + description: The socket ID to be configured. + type: int + namespace: + description: The list of the detail of namespace. + type: list + sample: [ + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 0 + }, + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 1 + } + ] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent. community.general.pmem: appdirect: 10 @@ -205,7 +201,7 @@ EXAMPLES = r''' - size: 320MB type: pmem mode: sector -''' +""" import json import re diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py index 315b07ba8e..c4dbf55dff 100644 --- a/plugins/modules/pnpm.py +++ b/plugins/modules/pnpm.py @@ -12,13 +12,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pnpm -short_description: Manage node.js packages with pnpm +short_description: Manage Node.js packages with C(pnpm) version_added: 7.4.0 description: - - Manage node.js packages with the L(pnpm package manager, https://pnpm.io/). + - Manage Node.js packages with the L(pnpm package manager, https://pnpm.io/). author: - "Aritra Sen (@aretrosen)" - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module" @@ -32,18 +31,18 @@ attributes: options: name: description: - - The name of a node.js library to install. - - All packages in package.json are installed if not provided. + - The name of a Node.js library to install. + - All packages in C(package.json) are installed if not provided. type: str required: false alias: description: - - Alias of the node.js library. + - Alias of the Node.js library. type: str required: false path: description: - - The base path to install the node.js libraries. + - The base path to install the Node.js libraries. type: path required: false version: @@ -53,7 +52,7 @@ options: required: false global: description: - - Install the node.js library globally. + - Install the Node.js library globally. required: false default: false type: bool @@ -97,7 +96,7 @@ options: type: bool state: description: - - Installation state of the named node.js library. + - Installation state of the named Node.js library. - If V(absent) is selected, a name option must be provided. type: str required: false @@ -107,36 +106,36 @@ requirements: - Pnpm executable present in E(PATH). """ -EXAMPLES = """ -- name: Install "tailwindcss" node.js package. +EXAMPLES = r""" +- name: Install "tailwindcss" Node.js package. community.general.pnpm: name: tailwindcss path: /app/location -- name: Install "tailwindcss" node.js package on version 3.3.2 +- name: Install "tailwindcss" Node.js package on version 3.3.2 community.general.pnpm: name: tailwindcss version: 3.3.2 path: /app/location -- name: Install "tailwindcss" node.js package globally. +- name: Install "tailwindcss" Node.js package globally. community.general.pnpm: name: tailwindcss global: true -- name: Install "tailwindcss" node.js package as dev dependency. +- name: Install "tailwindcss" Node.js package as dev dependency. community.general.pnpm: name: tailwindcss path: /app/location dev: true -- name: Install "tailwindcss" node.js package as optional dependency. +- name: Install "tailwindcss" Node.js package as optional dependency. community.general.pnpm: name: tailwindcss path: /app/location optional: true -- name: Install "tailwindcss" node.js package version 0.1.3 as tailwind-1 +- name: Install "tailwindcss" Node.js package version 0.1.3 as tailwind-1 community.general.pnpm: name: tailwindcss alias: tailwind-1 @@ -158,6 +157,7 @@ EXAMPLES = """ path: /app/location state: latest """ + import json import os diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py index 8ae8efb087..f0b7220836 100644 --- a/plugins/modules/portage.py +++ b/plugins/modules/portage.py @@ -14,13 +14,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: portage short_description: Package manager for Gentoo description: - - Manages Gentoo packages - + - Manages Gentoo packages. extends_documentation_fragment: - community.general.attributes @@ -33,21 +31,21 @@ attributes: options: package: description: - - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world) + - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world). aliases: [name] type: list elements: str state: description: - - State of the package atom + - State of the package atom. default: "present" - choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] + choices: ["present", "installed", "emerged", "absent", "removed", "unmerged", "latest"] type: str update: description: - - Update packages to the best version available (--update) + - Update packages to the best version available (C(--update)). type: bool default: false @@ -59,82 +57,81 @@ options: deep: description: - - Consider the entire dependency tree of packages (--deep) + - Consider the entire dependency tree of packages (C(--deep)). type: bool default: false newuse: description: - - Include installed packages where USE flags have changed (--newuse) + - Include installed packages where USE flags have changed (C(--newuse)). type: bool default: false changed_use: description: - - Include installed packages where USE flags have changed, except when - - flags that the user has not enabled are added or removed - - (--changed-use) + - Include installed packages where USE flags have changed, except when. + - Flags that the user has not enabled are added or removed. + - (C(--changed-use)). type: bool default: false oneshot: description: - - Do not add the packages to the world file (--oneshot) + - Do not add the packages to the world file (C(--oneshot)). type: bool default: false noreplace: description: - - Do not re-emerge installed packages (--noreplace) + - Do not re-emerge installed packages (C(--noreplace)). type: bool default: true nodeps: description: - - Only merge packages but not their dependencies (--nodeps) + - Only merge packages but not their dependencies (C(--nodeps)). type: bool default: false onlydeps: description: - - Only merge packages' dependencies but not the packages (--onlydeps) + - Only merge packages' dependencies but not the packages (C(--onlydeps)). type: bool default: false depclean: description: - - Remove packages not needed by explicitly merged packages (--depclean) - - If no package is specified, clean up the world's dependencies - - Otherwise, --depclean serves as a dependency aware version of --unmerge + - Remove packages not needed by explicitly merged packages (C(--depclean)). + - If no package is specified, clean up the world's dependencies. + - Otherwise, C(--depclean) serves as a dependency aware version of C(--unmerge). type: bool default: false quiet: description: - - Run emerge in quiet mode (--quiet) + - Run emerge in quiet mode (C(--quiet)). type: bool default: false verbose: description: - - Run emerge in verbose mode (--verbose) + - Run emerge in verbose mode (C(--verbose)). type: bool default: false select: description: - If set to V(true), explicitely add the package to the world file. - - Please note that this option is not used for idempotency, it is only used - when actually installing a package. + - Please note that this option is not used for idempotency, it is only used when actually installing a package. type: bool version_added: 8.6.0 sync: description: - - Sync package repositories first - - If V(yes), perform "emerge --sync" - - If V(web), perform "emerge-webrsync" - choices: [ "web", "yes", "no" ] + - Sync package repositories first. + - If V(yes), perform C(emerge --sync). + - If V(web), perform C(emerge-webrsync). + choices: ["web", "yes", "no"] type: str getbinpkgonly: @@ -171,16 +168,13 @@ options: jobs: description: - Specifies the number of packages to build simultaneously. - - "Since version 2.6: Value of 0 or False resets any previously added" - - --jobs setting values + - 'Since version 2.6: Value of V(0) or V(false) resets any previously added C(--jobs) setting values.' type: int loadavg: description: - - Specifies that no new builds should be started if there are - - other builds running and the load average is at least LOAD - - "Since version 2.6: Value of 0 or False resets any previously added" - - --load-average setting values + - Specifies that no new builds should be started if there are other builds running and the load average is at least LOAD. + - 'Since version 2.6: Value of 0 or False resets any previously added C(--load-average) setting values.' type: float withbdeps: @@ -191,26 +185,24 @@ options: quietbuild: description: - - Redirect all build output to logs alone, and do not display it - - on stdout (--quiet-build) + - Redirect all build output to logs alone, and do not display it on stdout (C(--quiet-build)). type: bool default: false quietfail: description: - - Suppresses display of the build log on stdout (--quiet-fail) - - Only the die message and the path of the build log will be - - displayed on stdout. + - Suppresses display of the build log on stdout (--quiet-fail). + - Only the die message and the path of the build log will be displayed on stdout. type: bool default: false author: - - "William L Thomson Jr (@wltjr)" - - "Yap Sok Ann (@sayap)" - - "Andrew Udvare (@Tatsh)" -''' + - "William L Thomson Jr (@wltjr)" + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare (@Tatsh)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure package foo is installed community.general.portage: package: foo @@ -252,7 +244,7 @@ EXAMPLES = ''' package: foo state: absent depclean: true -''' +""" import os import re diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py index 59dafb1eb8..d4e1591d32 100644 --- a/plugins/modules/portinstall.py +++ b/plugins/modules/portinstall.py @@ -12,43 +12,42 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: portinstall short_description: Installing packages from FreeBSD's ports system description: - - Manage packages for FreeBSD using 'portinstall'. + - Manage packages for FreeBSD using C(portinstall). extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - name of package to install/remove - aliases: [pkg] - required: true - type: str - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - type: str - use_packages: - description: - - use packages instead of ports whenever available - type: bool - required: false - default: true + name: + description: + - Name of package to install/remove. + aliases: [pkg] + required: true + type: str + state: + description: + - State of the package. + choices: ['present', 'absent'] + required: false + default: present + type: str + use_packages: + description: + - Use packages instead of ports whenever available. + type: bool + required: false + default: true author: "berenddeboer (@berenddeboer)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.portinstall: name: foo @@ -63,7 +62,7 @@ EXAMPLES = ''' community.general.portinstall: name: foo,bar state: absent -''' +""" import re diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py index 4945a8fc20..d97f9567cd 100644 --- a/plugins/modules/pritunl_org.py +++ b/plugins/modules/pritunl_org.py @@ -8,54 +8,47 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_org author: Florian Dambrine (@Lowess) version_added: 2.5.0 short_description: Manages Pritunl Organizations using the Pritunl API description: - - A module to manage Pritunl organizations using the Pritunl API. + - A module to manage Pritunl organizations using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl - - community.general.attributes + - community.general.pritunl + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - required: true - aliases: - - org - description: - - The name of the organization to manage in Pritunl. - - force: - type: bool - default: false - description: - - If O(force) is V(true) and O(state) is V(absent), the module - will delete the organization, no matter if it contains users - or not. By default O(force) is V(false), which will cause the - module to fail the deletion of the organization when it contains - users. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If V(present), the module adds organization O(name) to - Pritunl. If V(absent), attempt to delete the organization - from Pritunl (please read about O(force) usage). + name: + type: str + required: true + aliases: + - org + description: + - The name of the organization to manage in Pritunl. + force: + type: bool + default: false + description: + - If O(force) is V(true) and O(state) is V(absent), the module will delete the organization, no matter if it contains users or not. By default + O(force) is V(false), which will cause the module to fail the deletion of the organization when it contains users. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from Pritunl (please + read about O(force) usage). """ -EXAMPLES = """ +EXAMPLES = r""" - name: Ensure the organization named MyOrg exists community.general.pritunl_org: state: present @@ -67,7 +60,7 @@ EXAMPLES = """ name: MyOrg """ -RETURN = """ +RETURN = r""" response: description: JSON representation of a Pritunl Organization. returned: success diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py index 979e29b5a0..dc198bc9cc 100644 --- a/plugins/modules/pritunl_org_info.py +++ b/plugins/modules/pritunl_org_info.py @@ -8,32 +8,29 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_org_info author: Florian Dambrine (@Lowess) version_added: 2.5.0 short_description: List Pritunl Organizations using the Pritunl API description: - - A module to list Pritunl organizations using the Pritunl API. + - A module to list Pritunl organizations using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl - - community.general.attributes - - community.general.attributes.info_module + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module options: - organization: - type: str - required: false - aliases: - - org - default: null - description: - - Name of the Pritunl organization to search for. - If none provided, the module will return all Pritunl - organizations. + organization: + type: str + required: false + aliases: + - org + default: null + description: + - Name of the Pritunl organization to search for. If none provided, the module will return all Pritunl organizations. """ -EXAMPLES = """ +EXAMPLES = r""" - name: List all existing Pritunl organizations community.general.pritunl_org_info: @@ -42,7 +39,7 @@ EXAMPLES = """ organization: MyOrg """ -RETURN = """ +RETURN = r""" organizations: description: List of Pritunl organizations. returned: success diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py index bdbc335d90..932c4c1322 100644 --- a/plugins/modules/pritunl_user.py +++ b/plugins/modules/pritunl_user.py @@ -8,97 +8,87 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_user author: "Florian Dambrine (@Lowess)" version_added: 2.3.0 short_description: Manage Pritunl Users using the Pritunl API description: - - A module to manage Pritunl users using the Pritunl API. + - A module to manage Pritunl users using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl - - community.general.attributes + - community.general.pritunl + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If V(present), the module adds user O(user_name) to - the Pritunl O(organization). If V(absent), removes the user - O(user_name) from the Pritunl O(organization). - - user_name: - type: str - required: true - default: null - description: - - Name of the user to create or delete from Pritunl. - - user_email: - type: str - required: false - default: null - description: - - Email address associated with the user O(user_name). - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user O(user_name). - - user_groups: - type: list - elements: str - required: false - default: null - description: - - List of groups associated with the user O(user_name). - - user_disabled: - type: bool - required: false - default: null - description: - - Enable/Disable the user O(user_name). - - user_gravatar: - type: bool - required: false - default: null - description: - - Enable/Disable Gravatar usage for the user O(user_name). - - user_mac_addresses: - type: list - elements: str - description: - - Allowed MAC addresses for the user O(user_name). - version_added: 5.0.0 + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name) from the + Pritunl O(organization). + user_name: + type: str + required: true + default: + description: + - Name of the user to create or delete from Pritunl. + user_email: + type: str + required: false + default: + description: + - Email address associated with the user O(user_name). + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). + user_groups: + type: list + elements: str + required: false + default: + description: + - List of groups associated with the user O(user_name). + user_disabled: + type: bool + required: false + default: + description: + - Enable/Disable the user O(user_name). + user_gravatar: + type: bool + required: false + default: + description: + - Enable/Disable Gravatar usage for the user O(user_name). + user_mac_addresses: + type: list + elements: str + description: + - Allowed MAC addresses for the user O(user_name). + version_added: 5.0.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: Create the user Foo with email address foo@bar.com in MyOrg community.general.pritunl_user: state: present @@ -123,7 +113,7 @@ EXAMPLES = """ user_name: Foo """ -RETURN = """ +RETURN = r""" response: description: JSON representation of Pritunl Users. returned: success diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py index 3f8f62003f..02d8512315 100644 --- a/plugins/modules/pritunl_user_info.py +++ b/plugins/modules/pritunl_user_info.py @@ -8,45 +8,42 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_user_info author: "Florian Dambrine (@Lowess)" version_added: 2.3.0 short_description: List Pritunl Users using the Pritunl API description: - - A module to list Pritunl users using the Pritunl API. + - A module to list Pritunl users using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl - - community.general.attributes - - community.general.attributes.info_module + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - user_name: - type: str - required: false - description: - - Name of the user to filter on Pritunl. - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user O(user_name). + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + user_name: + type: str + required: false + description: + - Name of the user to filter on Pritunl. + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). """ -EXAMPLES = """ +EXAMPLES = r""" - name: List all existing users part of the organization MyOrg community.general.pritunl_user_info: state: list @@ -59,7 +56,7 @@ EXAMPLES = """ user_name: Florian """ -RETURN = """ +RETURN = r""" users: description: List of Pritunl users. returned: success diff --git a/plugins/modules/profitbricks.py b/plugins/modules/profitbricks.py index 875bd78c4e..e912db6985 100644 --- a/plugins/modules/profitbricks.py +++ b/plugins/modules/profitbricks.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: profitbricks short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine description: - - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait - for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 + - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait + for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -34,7 +33,7 @@ options: type: str image: description: - - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. + - The system image ID for creating the virtual machine, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8). type: str image_password: description: @@ -65,7 +64,7 @@ options: - The CPU family type to allocate to the virtual machine. type: str default: AMD_OPTERON - choices: [ "AMD_OPTERON", "INTEL_XEON" ] + choices: ["AMD_OPTERON", "INTEL_XEON"] volume_size: description: - The size in GB of the boot volume. @@ -76,10 +75,10 @@ options: - The bus type for the volume. type: str default: VIRTIO - choices: [ "IDE", "VIRTIO"] + choices: ["IDE", "VIRTIO"] instance_ids: description: - - list of instance ids, currently only used when state='absent' to remove instances. + - List of instance ids, currently only used when state='absent' to remove instances. type: list elements: str default: [] @@ -93,7 +92,7 @@ options: - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. type: str default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] + choices: ["us/las", "de/fra", "de/fkb"] assign_public_ip: description: - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. @@ -106,47 +105,46 @@ options: default: 1 subscription_user: description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. type: str subscription_password: description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. type: str wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. type: bool default: true wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 remove_boot_volume: description: - - remove the bootVolume of the virtual machine you're destroying. + - Remove the bootVolume of the virtual machine you are destroying. type: bool default: true state: description: - - create or terminate instances + - Create or terminate instances. - 'The choices available are: V(running), V(stopped), V(absent), V(present).' type: str default: 'present' disk_type: description: - - the type of disk to be allocated. + - The type of disk to be allocated. type: str choices: [SSD, HDD] default: HDD requirements: - - "profitbricks" + - "profitbricks" author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' +""" +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Provisioning example @@ -192,7 +190,7 @@ EXAMPLES = ''' - 'web003.stackpointcloud.com' wait_timeout: 500 state: stopped -''' +""" import re import uuid diff --git a/plugins/modules/profitbricks_datacenter.py b/plugins/modules/profitbricks_datacenter.py index 4aa1fa5eeb..93c27b5d8d 100644 --- a/plugins/modules/profitbricks_datacenter.py +++ b/plugins/modules/profitbricks_datacenter.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: profitbricks_datacenter short_description: Create or destroy a ProfitBricks Virtual Datacenter description: - - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency - on profitbricks >= 1.0.0 + - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency + on profitbricks >= 1.0.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -38,41 +37,41 @@ options: type: str required: false default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] + choices: ["us/las", "de/fra", "de/fkb"] subscription_user: description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. type: str required: false subscription_password: description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. type: str required: false wait: description: - - wait for the datacenter to be created before returning + - Wait for the datacenter to be created before returning. required: false default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 state: description: - Create or terminate datacenters. - - "The available choices are: V(present), V(absent)." + - 'The available choices are: V(present), V(absent).' type: str required: false default: 'present' -requirements: [ "profitbricks" ] +requirements: ["profitbricks"] author: Matt Baldwin (@baldwinSPC) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a datacenter community.general.profitbricks_datacenter: datacenter: Tardis One @@ -83,7 +82,7 @@ EXAMPLES = ''' datacenter: Tardis One wait_timeout: 500 state: absent -''' +""" import re import time diff --git a/plugins/modules/profitbricks_nic.py b/plugins/modules/profitbricks_nic.py index 9498be15dc..0b559a6fed 100644 --- a/plugins/modules/profitbricks_nic.py +++ b/plugins/modules/profitbricks_nic.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: profitbricks_nic short_description: Create or Remove a NIC description: - - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 + - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -39,42 +38,42 @@ options: type: str lan: description: - - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. + - The LAN to place the NIC on. You can pass a LAN that does not exist and it will be created. Required on create. type: str subscription_user: description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. type: str required: true subscription_password: description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. type: str required: true wait: description: - - wait for the operation to complete before returning + - Wait for the operation to complete before returning. required: false default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 state: description: - - Indicate desired state of the resource - - "The available choices are: V(present), V(absent)." + - Indicate desired state of the resource. + - 'The available choices are: V(present), V(absent).' type: str required: false default: 'present' -requirements: [ "profitbricks" ] +requirements: ["profitbricks"] author: Matt Baldwin (@baldwinSPC) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a NIC community.general.profitbricks_nic: datacenter: Tardis One @@ -90,7 +89,7 @@ EXAMPLES = ''' name: 7341c2454f wait_timeout: 500 state: absent -''' +""" import re import uuid diff --git a/plugins/modules/profitbricks_volume.py b/plugins/modules/profitbricks_volume.py index f623da7128..48bacb2061 100644 --- a/plugins/modules/profitbricks_volume.py +++ b/plugins/modules/profitbricks_volume.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: profitbricks_volume short_description: Create or destroy a volume description: - - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 + - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -42,10 +41,10 @@ options: type: str required: false default: VIRTIO - choices: [ "IDE", "VIRTIO"] + choices: ["IDE", "VIRTIO"] image: description: - - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. + - The system image ID for the volume, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8). This can also be a snapshot image ID. type: str image_password: description: @@ -64,11 +63,11 @@ options: type: str required: false default: HDD - choices: [ "HDD", "SSD" ] + choices: ["HDD", "SSD"] licence_type: description: - The licence type for the volume. This is used when the image is non-standard. - - "The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER)." + - 'The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER).' type: str required: false default: UNKNOWN @@ -85,35 +84,35 @@ options: type: bool instance_ids: description: - - list of instance ids, currently only used when state='absent' to remove instances. + - List of instance ids, currently only used when state='absent' to remove instances. type: list elements: str default: [] subscription_user: description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. type: str required: false subscription_password: description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. type: str required: false wait: description: - - wait for the datacenter to be created before returning + - Wait for the datacenter to be created before returning. required: false default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 state: description: - - create or terminate datacenters - - "The available choices are: V(present), V(absent)." + - Create or terminate datacenters. + - 'The available choices are: V(present), V(absent).' type: str required: false default: 'present' @@ -122,11 +121,11 @@ options: - Server name to attach the volume to. type: str -requirements: [ "profitbricks" ] +requirements: ["profitbricks"] author: Matt Baldwin (@baldwinSPC) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create multiple volumes community.general.profitbricks_volume: datacenter: Tardis One @@ -144,7 +143,7 @@ EXAMPLES = ''' - 'vol02' wait_timeout: 500 state: absent -''' +""" import re import time diff --git a/plugins/modules/profitbricks_volume_attachments.py b/plugins/modules/profitbricks_volume_attachments.py index 76459515ee..63ca6775ab 100644 --- a/plugins/modules/profitbricks_volume_attachments.py +++ b/plugins/modules/profitbricks_volume_attachments.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: profitbricks_volume_attachments short_description: Attach or detach a volume description: - - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 + - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -36,38 +35,38 @@ options: type: str subscription_user: description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. type: str required: false subscription_password: description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. type: str required: false wait: description: - - wait for the operation to complete before returning + - Wait for the operation to complete before returning. required: false default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 state: description: - - Indicate desired state of the resource - - "The available choices are: V(present), V(absent)." + - Indicate desired state of the resource. + - 'The available choices are: V(present), V(absent).' type: str required: false default: 'present' -requirements: [ "profitbricks" ] +requirements: ["profitbricks"] author: Matt Baldwin (@baldwinSPC) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Attach a volume community.general.profitbricks_volume_attachments: datacenter: Tardis One @@ -83,7 +82,7 @@ EXAMPLES = ''' volume: vol01 wait_timeout: 500 state: absent -''' +""" import re import time From d96e56048f1b2e269f3a274fa81be42b77976887 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 10:48:15 +1300 Subject: [PATCH 409/482] [onev ... ovh]*.py: normalize docs (#9373) * [onev ... ovh]*.py: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/oneview_datacenter_info.py | 62 +++---- plugins/modules/oneview_enclosure_info.py | 86 +++++---- plugins/modules/oneview_ethernet_network.py | 88 +++++---- .../modules/oneview_ethernet_network_info.py | 68 ++++--- plugins/modules/oneview_fc_network.py | 64 ++++--- plugins/modules/oneview_fc_network_info.py | 44 +++-- plugins/modules/oneview_fcoe_network.py | 66 ++++--- plugins/modules/oneview_fcoe_network_info.py | 44 +++-- .../oneview_logical_interconnect_group.py | 84 +++++---- ...oneview_logical_interconnect_group_info.py | 44 +++-- plugins/modules/oneview_network_set.py | 76 ++++---- plugins/modules/oneview_network_set_info.py | 61 +++--- plugins/modules/oneview_san_manager.py | 73 ++++---- plugins/modules/oneview_san_manager_info.py | 62 +++---- plugins/modules/online_server_info.py | 18 +- plugins/modules/online_user_info.py | 14 +- plugins/modules/open_iscsi.py | 174 +++++++++--------- plugins/modules/openbsd_pkg.py | 120 ++++++------ plugins/modules/opendj_backendprop.py | 157 ++++++++-------- plugins/modules/openwrt_init.py | 76 ++++---- plugins/modules/opkg.py | 113 ++++++------ plugins/modules/osx_defaults.py | 24 ++- plugins/modules/ovh_ip_failover.py | 141 +++++++------- .../modules/ovh_ip_loadbalancing_backend.py | 139 +++++++------- plugins/modules/ovh_monthly_billing.py | 79 ++++---- 25 files changed, 956 insertions(+), 1021 deletions(-) diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py index ed04e2279f..831bd59f61 100644 --- a/plugins/modules/oneview_datacenter_info.py +++ b/plugins/modules/oneview_datacenter_info.py @@ -7,43 +7,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_datacenter_info short_description: Retrieve information about the OneView Data Centers description: - - Retrieve information about the OneView Data Centers. + - Retrieve information about the OneView Data Centers. requirements: - - "hpOneView >= 2.0.1" + - "hpOneView >= 2.0.1" author: - - Alex Monteiro (@aalexmonteiro) - - Madhav Bharadwaj (@madhav-bharadwaj) - - Priyanka Sood (@soodpr) - - Ricardo Galeno (@ricardogpsf) + - Alex Monteiro (@aalexmonteiro) + - Madhav Bharadwaj (@madhav-bharadwaj) + - Priyanka Sood (@soodpr) + - Ricardo Galeno (@ricardogpsf) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Data Center name. - type: str - options: - description: - - "Retrieve additional information. Options available: 'visualContent'." - type: list - elements: str + name: + description: + - Data Center name. + type: str + options: + description: + - "Retrieve additional information. Options available: V(visualContent)." + type: list + elements: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Data Centers community.general.oneview_datacenter_info: hostname: 172.16.101.48 @@ -107,19 +105,19 @@ EXAMPLES = ''' - name: Print fetched information about Data Center Visual Content ansible.builtin.debug: msg: "{{ result.datacenter_visual_content }}" -''' +""" -RETURN = ''' +RETURN = r""" datacenters: - description: Has all the OneView information about the Data Centers. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Data Centers. + returned: Always, but can be null. + type: dict datacenter_visual_content: - description: Has information about the Data Center Visual Content. - returned: When requested, but can be null. - type: dict -''' + description: Has information about the Data Center Visual Content. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py index 4e203a50ac..21feee769b 100644 --- a/plugins/modules/oneview_enclosure_info.py +++ b/plugins/modules/oneview_enclosure_info.py @@ -8,44 +8,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_enclosure_info short_description: Retrieve information about one or more Enclosures description: - - Retrieve information about one or more of the Enclosures from OneView. + - Retrieve information about one or more of the Enclosures from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Enclosure name. - type: str - options: - description: - - "List with options to gather additional information about an Enclosure and related resources. - Options allowed: V(script), V(environmentalConfiguration), and V(utilization). For the option V(utilization), - you can provide specific parameters." - type: list - elements: raw + name: + description: + - Enclosure name. + type: str + options: + description: + - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script), V(environmentalConfiguration), + and V(utilization). For the option V(utilization), you can provide specific parameters.' + type: list + elements: raw extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Enclosures community.general.oneview_enclosure_info: hostname: 172.16.101.48 @@ -98,9 +95,9 @@ EXAMPLES = ''' community.general.oneview_enclosure_info: name: Test-Enclosure options: - - script # optional - - environmentalConfiguration # optional - - utilization # optional + - script # optional + - environmentalConfiguration # optional + - utilization # optional hostname: 172.16.101.48 username: administrator password: my_password @@ -125,12 +122,11 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "{{ result.enclosure_utilization }}" -- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two - specified dates" +- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two specified dates" community.general.oneview_enclosure_info: name: Test-Enclosure options: - - utilization: # optional + - utilization: # optional fields: AmbientTemperature filter: - startDate=2016-07-01T14:29:42.000Z @@ -152,29 +148,29 @@ EXAMPLES = ''' - name: Print fetched information about Enclosure Utilization ansible.builtin.debug: msg: "{{ result.enclosure_utilization }}" -''' +""" -RETURN = ''' +RETURN = r""" enclosures: - description: Has all the OneView information about the Enclosures. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Enclosures. + returned: Always, but can be null. + type: dict enclosure_script: - description: Has all the OneView information about the script of an Enclosure. - returned: When requested, but can be null. - type: str + description: Has all the OneView information about the script of an Enclosure. + returned: When requested, but can be null. + type: str enclosure_environmental_configuration: - description: Has all the OneView information about the environmental configuration of an Enclosure. - returned: When requested, but can be null. - type: dict + description: Has all the OneView information about the environmental configuration of an Enclosure. + returned: When requested, but can be null. + type: dict enclosure_utilization: - description: Has all the OneView information about the utilization of an Enclosure. - returned: When requested, but can be null. - type: dict -''' + description: Has all the OneView information about the utilization of an Enclosure. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py index 981d949cdc..823fea3b2c 100644 --- a/plugins/modules/oneview_ethernet_network.py +++ b/plugins/modules/oneview_ethernet_network.py @@ -7,46 +7,44 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_ethernet_network short_description: Manage OneView Ethernet Network resources description: - - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. + - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. requirements: - - hpOneView >= 3.1.0 + - hpOneView >= 3.1.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Ethernet Network resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. - - V(default_bandwidth_reset) will reset the network connection template to the default. - type: str - default: present - choices: [present, absent, default_bandwidth_reset] - data: - description: - - List with Ethernet Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Ethernet Network resource. + - V(present) will ensure data properties are compliant with OneView. + - V(absent) will remove the resource from OneView, if it exists. + - V(default_bandwidth_reset) will reset the network connection template to the default. + type: str + default: present + choices: [present, absent, default_bandwidth_reset] + data: + description: + - List with Ethernet Network properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Ethernet Network is present using the default configuration community.general.oneview_ethernet_network: config: '/etc/oneview/oneview_config.json' @@ -64,8 +62,8 @@ EXAMPLES = ''' name: 'Test Ethernet Network' purpose: Management bandwidth: - maximumBandwidth: 3000 - typicalBandwidth: 2000 + maximumBandwidth: 3000 + typicalBandwidth: 2000 delegate_to: localhost - name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' @@ -107,24 +105,24 @@ EXAMPLES = ''' data: name: 'Test Ethernet Network' delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" ethernet_network: - description: Has the facts about the Ethernet Networks. - returned: On state 'present'. Can be null. - type: dict + description: Has the facts about the Ethernet Networks. + returned: On O(state=present). Can be null. + type: dict ethernet_network_bulk: - description: Has the facts about the Ethernet Networks affected by the bulk insert. - returned: When 'vlanIdRange' attribute is in data argument. Can be null. - type: dict + description: Has the facts about the Ethernet Networks affected by the bulk insert. + returned: When V(vlanIdRange) attribute is in O(data) argument. Can be null. + type: dict ethernet_network_connection_template: - description: Has the facts about the Ethernet Network Connection Template. - returned: On state 'default_bandwidth_reset'. Can be null. - type: dict -''' + description: Has the facts about the Ethernet Network Connection Template. + returned: On O(state=default_bandwidth_reset). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py index 7da008b04e..6eb4f46a19 100644 --- a/plugins/modules/oneview_ethernet_network_info.py +++ b/plugins/modules/oneview_ethernet_network_info.py @@ -7,42 +7,40 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_ethernet_network_info short_description: Retrieve the information about one or more of the OneView Ethernet Networks description: - - Retrieve the information about one or more of the Ethernet Networks from OneView. + - Retrieve the information about one or more of the Ethernet Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Ethernet Network name. - type: str - options: - description: - - "List with options to gather additional information about an Ethernet Network and related resources. - Options allowed: V(associatedProfiles) and V(associatedUplinkGroups)." - type: list - elements: str + name: + description: + - Ethernet Network name. + type: str + options: + description: + - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed: V(associatedProfiles) + and V(associatedUplinkGroups).' + type: list + elements: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Ethernet Networks community.general.oneview_ethernet_network_info: config: /etc/oneview/oneview_config.json @@ -96,24 +94,24 @@ EXAMPLES = ''' - name: Print fetched information about Ethernet Network Associated Uplink Groups ansible.builtin.debug: msg: "{{ result.enet_associated_uplink_groups }}" -''' +""" -RETURN = ''' +RETURN = r""" ethernet_networks: - description: Has all the OneView information about the Ethernet Networks. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Ethernet Networks. + returned: Always, but can be null. + type: dict enet_associated_profiles: - description: Has all the OneView information about the profiles which are using the Ethernet network. - returned: When requested, but can be null. - type: dict + description: Has all the OneView information about the profiles which are using the Ethernet network. + returned: When requested, but can be null. + type: dict enet_associated_uplink_groups: - description: Has all the OneView information about the uplink sets which are using the Ethernet network. - returned: When requested, but can be null. - type: dict -''' + description: Has all the OneView information about the uplink sets which are using the Ethernet network. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py index 9f0c4358b7..312a5dc893 100644 --- a/plugins/modules/oneview_fc_network.py +++ b/plugins/modules/oneview_fc_network.py @@ -7,43 +7,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fc_network short_description: Manage OneView Fibre Channel Network resources description: - - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. + - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. requirements: - - "hpOneView >= 4.0.0" + - "hpOneView >= 4.0.0" author: "Felipe Bulsoni (@fgbulsoni)" attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Fibre Channel Network resource. - V(present) will ensure data properties are compliant with OneView. - V(absent) will remove the resource from OneView, if it exists. - type: str - choices: ['present', 'absent'] - required: true - data: - description: - - List with the Fibre Channel Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Fibre Channel Network resource. + - V(present) will ensure data properties are compliant with OneView. + - V(absent) will remove the resource from OneView, if it exists. + type: str + choices: ['present', 'absent'] + required: true + data: + description: + - List with the Fibre Channel Network properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Fibre Channel Network is present using the default configuration community.general.oneview_fc_network: config: "{{ config_file_path }}" @@ -75,14 +73,14 @@ EXAMPLES = ''' state: absent data: name: 'New FC Network' -''' +""" -RETURN = ''' +RETURN = r""" fc_network: - description: Has the facts about the managed OneView FC Network. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the managed OneView FC Network. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py index 096af48308..af20869dc3 100644 --- a/plugins/modules/oneview_fc_network_info.py +++ b/plugins/modules/oneview_fc_network_info.py @@ -7,37 +7,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fc_network_info short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks description: - - Retrieve the information about one or more of the Fibre Channel Networks from OneView. + - Retrieve the information about one or more of the Fibre Channel Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Fibre Channel Network name. - type: str + name: + description: + - Fibre Channel Network name. + type: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Fibre Channel Networks community.general.oneview_fc_network_info: config: /etc/oneview/oneview_config.json @@ -73,14 +71,14 @@ EXAMPLES = ''' - name: Print fetched information about Fibre Channel Network found by name ansible.builtin.debug: msg: "{{ result.fc_networks }}" -''' +""" -RETURN = ''' +RETURN = r""" fc_networks: - description: Has all the OneView information about the Fibre Channel Networks. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the Fibre Channel Networks. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py index e1216b1d95..15128bd372 100644 --- a/plugins/modules/oneview_fcoe_network.py +++ b/plugins/modules/oneview_fcoe_network.py @@ -7,44 +7,42 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fcoe_network short_description: Manage OneView FCoE Network resources description: - - Provides an interface to manage FCoE Network resources. Can create, update, or delete. + - Provides an interface to manage FCoE Network resources. Can create, update, or delete. requirements: - - "Python >= 2.7.9" - - "hpOneView >= 4.0.0" + - "Python >= 2.7.9" + - "hpOneView >= 4.0.0" author: "Felipe Bulsoni (@fgbulsoni)" attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the FCoE Network resource. - V(present) will ensure data properties are compliant with OneView. - V(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with FCoE Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the FCoE Network resource. + - V(present) will ensure data properties are compliant with OneView. + - V(absent) will remove the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with FCoE Network properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that FCoE Network is present using the default configuration community.general.oneview_fcoe_network: config: '/etc/oneview/oneview_config.json' @@ -72,14 +70,14 @@ EXAMPLES = ''' data: name: New FCoE Network delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" fcoe_network: - description: Has the facts about the OneView FCoE Networks. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the OneView FCoE Networks. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py index b3460d59aa..6d5074be4a 100644 --- a/plugins/modules/oneview_fcoe_network_info.py +++ b/plugins/modules/oneview_fcoe_network_info.py @@ -7,36 +7,34 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fcoe_network_info short_description: Retrieve the information about one or more of the OneView FCoE Networks description: - - Retrieve the information about one or more of the FCoE Networks from OneView. + - Retrieve the information about one or more of the FCoE Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - FCoE Network name. - type: str + name: + description: + - FCoE Network name. + type: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all FCoE Networks community.general.oneview_fcoe_network_info: config: /etc/oneview/oneview_config.json @@ -72,14 +70,14 @@ EXAMPLES = ''' - name: Print fetched information about FCoE Network found by name ansible.builtin.debug: msg: "{{ result.fcoe_networks }}" -''' +""" -RETURN = ''' +RETURN = r""" fcoe_networks: - description: Has all the OneView information about the FCoE Networks. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the FCoE Networks. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py index d1303f011a..866dabc6b8 100644 --- a/plugins/modules/oneview_logical_interconnect_group.py +++ b/plugins/modules/oneview_logical_interconnect_group.py @@ -8,45 +8,43 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_logical_interconnect_group short_description: Manage OneView Logical Interconnect Group resources description: - - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. + - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. requirements: - - hpOneView >= 4.0.0 + - hpOneView >= 4.0.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Logical Interconnect Group resource. - V(absent) will remove the resource from OneView, if it exists. - V(present) will ensure data properties are compliant with OneView. - type: str - choices: [absent, present] - default: present - data: - description: - - List with the Logical Interconnect Group properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Logical Interconnect Group resource. + - V(absent) will remove the resource from OneView, if it exists. + - V(present) will ensure data properties are compliant with OneView. + type: str + choices: [absent, present] + default: present + data: + description: + - List with the Logical Interconnect Group properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Logical Interconnect Group is present community.general.oneview_logical_interconnect_group: config: /etc/oneview/oneview_config.json @@ -57,15 +55,15 @@ EXAMPLES = ''' enclosureType: C7000 interconnectMapTemplate: interconnectMapEntryTemplates: - - logicalDownlinkUri: ~ + - logicalDownlinkUri: logicalLocation: - locationEntries: - - relativeValue: 1 - type: Bay - - relativeValue: 1 - type: Enclosure + locationEntries: + - relativeValue: 1 + type: Bay + - relativeValue: 1 + type: Enclosure permittedInterconnectTypeName: HP VC Flex-10/10D Module - # Alternatively you can inform permittedInterconnectTypeUri + # Alternatively you can inform permittedInterconnectTypeUri delegate_to: localhost - name: Ensure that the Logical Interconnect Group has the specified scopes @@ -95,14 +93,14 @@ EXAMPLES = ''' data: name: New Logical Interconnect Group delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" logical_interconnect_group: - description: Has the facts about the OneView Logical Interconnect Group. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the OneView Logical Interconnect Group. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py index 6f6a908f29..1c9e415d0e 100644 --- a/plugins/modules/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/oneview_logical_interconnect_group_info.py @@ -8,36 +8,34 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_logical_interconnect_group_info short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups description: - - Retrieve information about one or more of the Logical Interconnect Groups from OneView + - Retrieve information about one or more of the Logical Interconnect Groups from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Logical Interconnect Group name. - type: str + name: + description: + - Logical Interconnect Group name. + type: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Logical Interconnect Groups community.general.oneview_logical_interconnect_group_info: hostname: 172.16.101.48 @@ -85,14 +83,14 @@ EXAMPLES = ''' - name: Print fetched information about Logical Interconnect Group found by name ansible.builtin.debug: msg: "{{ result.logical_interconnect_groups }}" -''' +""" -RETURN = ''' +RETURN = r""" logical_interconnect_groups: - description: Has all the OneView information about the Logical Interconnect Groups. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the Logical Interconnect Groups. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py index 0efd417d63..a7fae51f21 100644 --- a/plugins/modules/oneview_network_set.py +++ b/plugins/modules/oneview_network_set.py @@ -7,46 +7,44 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_network_set short_description: Manage HPE OneView Network Set resources description: - - Provides an interface to manage Network Set resources. Can create, update, or delete. + - Provides an interface to manage Network Set resources. Can create, update, or delete. requirements: - - hpOneView >= 4.0.0 + - hpOneView >= 4.0.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Network Set resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with the Network Set properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Network Set resource. + - V(present) will ensure data properties are compliant with OneView. + - V(absent) will remove the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with the Network Set properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Network Set community.general.oneview_network_set: config: /etc/oneview/oneview_config.json @@ -54,8 +52,8 @@ EXAMPLES = ''' data: name: OneViewSDK Test Network Set networkUris: - - Test Ethernet Network_1 # can be a name - - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI + - Test Ethernet Network_1 # can be a name + - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI delegate_to: localhost - name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks @@ -74,7 +72,7 @@ EXAMPLES = ''' config: /etc/oneview/oneview_config.json state: absent data: - name: OneViewSDK Test Network Set - Renamed + name: OneViewSDK Test Network Set - Renamed delegate_to: localhost - name: Update the Network set with two scopes @@ -87,14 +85,14 @@ EXAMPLES = ''' - /rest/scopes/01SC123456 - /rest/scopes/02SC123456 delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" network_set: - description: Has the facts about the Network Set. - returned: On state 'present', but can be null. - type: dict -''' + description: Has the facts about the Network Set. + returned: On O(state=present), but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py index cef53d8fcd..f3a4ace3da 100644 --- a/plugins/modules/oneview_network_set_info.py +++ b/plugins/modules/oneview_network_set_info.py @@ -7,45 +7,42 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_network_set_info short_description: Retrieve information about the OneView Network Sets description: - - Retrieve information about the Network Sets from OneView. + - Retrieve information about the Network Sets from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Network Set name. - type: str + name: + description: + - Network Set name. + type: str - options: - description: - - "List with options to gather information about Network Set. - Option allowed: V(withoutEthernet). - The option V(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." - type: list - elements: str + options: + description: + - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet) retrieves + the list of network_sets excluding Ethernet networks.' + type: list + elements: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Network Sets community.general.oneview_network_set_info: hostname: 172.16.101.48 @@ -86,7 +83,7 @@ EXAMPLES = ''' password: my_password api_version: 500 options: - - withoutEthernet + - withoutEthernet no_log: true delegate_to: localhost register: result @@ -118,7 +115,7 @@ EXAMPLES = ''' api_version: 500 name: Name of the Network Set options: - - withoutEthernet + - withoutEthernet no_log: true delegate_to: localhost register: result @@ -126,14 +123,14 @@ EXAMPLES = ''' - name: Print fetched information about Network Set found by name, excluding Ethernet networks ansible.builtin.debug: msg: "{{ result.network_sets }}" -''' +""" -RETURN = ''' +RETURN = r""" network_sets: - description: Has all the OneView information about the Network Sets. - returned: Always, but can be empty. - type: dict -''' + description: Has all the OneView information about the Network Sets. + returned: Always, but can be empty. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py index 15282aec21..8c03bda463 100644 --- a/plugins/modules/oneview_san_manager.py +++ b/plugins/modules/oneview_san_manager.py @@ -7,47 +7,46 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_san_manager short_description: Manage OneView SAN Manager resources description: - - Provides an interface to manage SAN Manager resources. Can create, update, or delete. + - Provides an interface to manage SAN Manager resources. Can create, update, or delete. requirements: - - hpOneView >= 3.1.1 + - hpOneView >= 3.1.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Uplink Set resource. - - V(present) ensures data properties are compliant with OneView. - - V(absent) removes the resource from OneView, if it exists. - - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. - type: str - default: present - choices: [present, absent, connection_information_set] - data: - description: - - List with SAN Manager properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Uplink Set resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(connection_information_set) updates the connection information for the SAN Manager. + This operation is non-idempotent. + type: str + default: present + choices: [present, absent, connection_information_set] + data: + description: + - List with SAN Manager properties. + type: dict + required: true extends_documentation_fragment: - - community.general.oneview - - community.general.oneview.validateetag - - community.general.attributes + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials community.general.oneview_san_manager: config: /etc/oneview/oneview_config.json @@ -123,14 +122,14 @@ EXAMPLES = ''' data: name: '172.18.15.1' delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" san_manager: - description: Has the OneView facts about the SAN Manager. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the OneView facts about the SAN Manager. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError diff --git a/plugins/modules/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py index f994280ca8..63797e298e 100644 --- a/plugins/modules/oneview_san_manager_info.py +++ b/plugins/modules/oneview_san_manager_info.py @@ -7,44 +7,42 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_san_manager_info short_description: Retrieve information about one or more of the OneView SAN Managers description: - - Retrieve information about one or more of the SAN Managers from OneView + - Retrieve information about one or more of the SAN Managers from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - provider_display_name: - description: - - Provider Display Name. - type: str - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - V(start): The first item to return, using 0-based indexing. - - V(count): The number of resources to return. - - V(query): A general query string to narrow the list of resources returned. - - V(sort): The sort order of the returned data set." - type: dict + provider_display_name: + description: + - Provider Display Name. + type: str + params: + description: + - List of params to delimit, filter and sort the list of resources. + - 'Params allowed:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(query): A general query string to narrow the list of resources returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict extends_documentation_fragment: - community.general.oneview - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all SAN Managers community.general.oneview_san_manager_info: config: /etc/oneview/oneview_config.json @@ -80,14 +78,14 @@ EXAMPLES = ''' - name: Print fetched information about SAN Manager found by provider display name ansible.builtin.debug: msg: "{{ result.san_managers }}" -''' +""" -RETURN = ''' +RETURN = r""" san_managers: - description: Has all the OneView information about the SAN Managers. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the SAN Managers. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py index f6d03cb275..e36c78ef0e 100644 --- a/plugins/modules/online_server_info.py +++ b/plugins/modules/online_server_info.py @@ -8,23 +8,21 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: online_server_info short_description: Gather information about Online servers description: - Gather information about the servers. - - U(https://www.online.net/en/dedicated-server) + - U(https://www.online.net/en/dedicated-server). author: - "Remy Leone (@remyleone)" extends_documentation_fragment: - community.general.online - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Online server information community.general.online_server_info: api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' @@ -32,13 +30,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.online_server_info }}" -''' +""" -RETURN = r''' +RETURN = r""" online_server_info: description: - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." + - 'For more details please refer to: U(https://console.online.net/en/api/).' returned: success type: list elements: dict @@ -130,7 +128,7 @@ online_server_info: "support": "Basic service level" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.online import ( diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py index 1d91418caf..60e0763267 100644 --- a/plugins/modules/online_user_info.py +++ b/plugins/modules/online_user_info.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: online_user_info short_description: Gather information about Online user description: @@ -18,22 +18,22 @@ extends_documentation_fragment: - community.general.online - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Online user info community.general.online_user_info: register: result - ansible.builtin.debug: msg: "{{ result.online_user_info }}" -''' +""" -RETURN = r''' +RETURN = r""" online_user_info: description: - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." + - 'For more details please refer to: U(https://console.online.net/en/api/).' returned: success type: dict sample: @@ -45,7 +45,7 @@ online_user_info: "last_name": "bar", "login": "foobar" } -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.online import ( diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py index df8a694a7e..15e20a241a 100644 --- a/plugins/modules/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -8,103 +8,99 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: open_iscsi author: - - Serge van Ginderachter (@srvg) + - Serge van Ginderachter (@srvg) short_description: Manage iSCSI targets with Open-iSCSI description: - - Discover targets on given portal, (dis)connect targets, mark targets to - manually or auto start, return device nodes of connected targets. + - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of connected targets. requirements: - - open_iscsi library and tools (iscsiadm) + - open_iscsi library and tools (iscsiadm) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - portal: - description: - - The domain name or IP address of the iSCSI target. - type: str - aliases: [ ip ] - port: - description: - - The port on which the iSCSI target process listens. - type: str - default: '3260' - target: - description: - - The iSCSI target name. - type: str - aliases: [ name, targetname ] - login: - description: - - Whether the target node should be connected. - - When O(target) is omitted, will login to all available. - type: bool - aliases: [ state ] - node_auth: - description: - - The value for C(node.session.auth.authmethod). - type: str - default: CHAP - node_user: - description: - - The value for C(node.session.auth.username). - type: str - node_pass: - description: - - The value for C(node.session.auth.password). - type: str - node_user_in: - description: - - The value for C(node.session.auth.username_in). - type: str - version_added: 3.8.0 - node_pass_in: - description: - - The value for C(node.session.auth.password_in). - type: str - version_added: 3.8.0 - auto_node_startup: - description: - - Whether the target node should be automatically connected at startup. - type: bool - aliases: [ automatic ] - auto_portal_startup: - description: - - Whether the target node portal should be automatically connected at startup. - type: bool - version_added: 3.2.0 - discover: - description: - - Whether the list of target nodes on the portal should be - (re)discovered and added to the persistent iSCSI database. - - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) - to manual, hence combined with O(auto_node_startup=true) will always return - a changed state. - type: bool - default: false - show_nodes: - description: - - Whether the list of nodes in the persistent iSCSI database should be returned by the module. - type: bool - default: false - rescan: - description: - - Rescan an established session for discovering new targets. - - When O(target) is omitted, will rescan all sessions. - type: bool - default: false - version_added: 4.1.0 -''' + portal: + description: + - The domain name or IP address of the iSCSI target. + type: str + aliases: [ip] + port: + description: + - The port on which the iSCSI target process listens. + type: str + default: '3260' + target: + description: + - The iSCSI target name. + type: str + aliases: [name, targetname] + login: + description: + - Whether the target node should be connected. + - When O(target) is omitted, will login to all available. + type: bool + aliases: [state] + node_auth: + description: + - The value for C(node.session.auth.authmethod). + type: str + default: CHAP + node_user: + description: + - The value for C(node.session.auth.username). + type: str + node_pass: + description: + - The value for C(node.session.auth.password). + type: str + node_user_in: + description: + - The value for C(node.session.auth.username_in). + type: str + version_added: 3.8.0 + node_pass_in: + description: + - The value for C(node.session.auth.password_in). + type: str + version_added: 3.8.0 + auto_node_startup: + description: + - Whether the target node should be automatically connected at startup. + type: bool + aliases: [automatic] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 + discover: + description: + - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with O(auto_node_startup=true) + will always return a changed state. + type: bool + default: false + show_nodes: + description: + - Whether the list of nodes in the persistent iSCSI database should be returned by the module. + type: bool + default: false + rescan: + description: + - Rescan an established session for discovering new targets. + - When O(target) is omitted, will rescan all sessions. + type: bool + default: false + version_added: 4.1.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Perform a discovery on sun.com and show available target nodes community.general.open_iscsi: show_nodes: true @@ -144,7 +140,7 @@ EXAMPLES = r''' community.general.open_iscsi: rescan: true target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d -''' +""" import glob import os diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py index 69ac7bff8e..394a5b75e2 100644 --- a/plugins/modules/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: openbsd_pkg author: - Patrik Lundin (@eest) @@ -21,69 +20,64 @@ description: extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial - version_added: 9.1.0 - details: - - Only works when check mode is not enabled. + check_mode: + support: full + diff_mode: + support: partial + version_added: 9.1.0 + details: + - Only works when check mode is not enabled. options: - name: - description: - - A name or a list of names of the packages. - required: true - type: list - elements: str - state: - description: - - V(present) will make sure the package is installed. - - V(latest) will make sure the latest version of the package is installed. - - V(absent) will make sure the specified package is not installed. - choices: [ absent, latest, present, installed, removed ] - default: present - type: str - build: - description: - - Build the package from source instead of downloading and installing - a binary. Requires that the port source tree is already installed. - Automatically builds and installs the 'sqlports' package, if it is - not already installed. - - Mutually exclusive with O(snapshot). - type: bool - default: false - snapshot: - description: - - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. - - Mutually exclusive with O(build). - type: bool - default: false - version_added: 1.3.0 - ports_dir: - description: - - When used in combination with the O(build) option, allows overriding - the default ports source directory. - default: /usr/ports - type: path - clean: - description: - - When updating or removing packages, delete the extra configuration - file(s) in the old packages which are annotated with @extra in - the packaging-list. - type: bool - default: false - quick: - description: - - Replace or delete packages quickly; do not bother with checksums - before removing normal files. - type: bool - default: false + name: + description: + - A name or a list of names of the packages. + required: true + type: list + elements: str + state: + description: + - V(present) will make sure the package is installed. + - V(latest) will make sure the latest version of the package is installed. + - V(absent) will make sure the specified package is not installed. + choices: [absent, latest, present, installed, removed] + default: present + type: str + build: + description: + - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is already installed. + Automatically builds and installs the C(sqlports) package, if it is not already installed. + - Mutually exclusive with O(snapshot). + type: bool + default: false + snapshot: + description: + - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. + - Mutually exclusive with O(build). + type: bool + default: false + version_added: 1.3.0 + ports_dir: + description: + - When used in combination with the O(build) option, allows overriding the default ports source directory. + default: /usr/ports + type: path + clean: + description: + - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated with @extra in the + packaging-list. + type: bool + default: false + quick: + description: + - Replace or delete packages quickly; do not bother with checksums before removing normal files. + type: bool + default: false notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. -''' + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) + option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure nmap is installed community.general.openbsd_pkg: name: nmap @@ -136,7 +130,7 @@ EXAMPLES = ''' name: qt5 quick: true state: absent -''' +""" import os import platform diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py index fed53532d9..7b48587faf 100644 --- a/plugins/modules/opendj_backendprop.py +++ b/plugins/modules/opendj_backendprop.py @@ -8,94 +8,93 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: opendj_backendprop -short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command +short_description: Will update the backend configuration of OpenDJ using the dsconfig set-backend-prop command description: - - This module will update settings for OpenDJ with the command set-backend-prop. - - It will check first via de get-backend-prop if configuration needs to be applied. + - This module will update settings for OpenDJ with the command set-backend-prop. + - It will check first using de get-backend-prop if configuration needs to be applied. author: - - Werner Dijkerman (@dj-wasabi) + - Werner Dijkerman (@dj-wasabi) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - opendj_bindir: - description: - - The path to the bin directory of OpenDJ. - required: false - default: /opt/opendj/bin - type: path - hostname: - description: - - The hostname of the OpenDJ server. - required: true - type: str - port: - description: - - The Admin port on which the OpenDJ instance is available. - required: true - type: str - username: - description: - - The username to connect to. - required: false - default: cn=Directory Manager - type: str - password: - description: - - The password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: str - passwordfile: - description: - - Location to the password file which holds the password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: path - backend: - description: - - The name of the backend on which the property needs to be updated. - required: true - type: str - name: - description: - - The configuration setting to update. - required: true - type: str - value: - description: - - The value for the configuration item. - required: true - type: str - state: - description: - - If configuration needs to be added/updated - required: false - default: "present" - type: str -''' + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + type: path + hostname: + description: + - The hostname of the OpenDJ server. + required: true + type: str + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + type: str + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + type: str + password: + description: + - The password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: str + passwordfile: + description: + - Location to the password file which holds the password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: path + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + type: str + name: + description: + - The configuration setting to update. + required: true + type: str + value: + description: + - The value for the configuration item. + required: true + type: str + state: + description: + - If configuration needs to be added/updated. + required: false + default: "present" + type: str +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add or update OpenDJ backend properties - action: opendj_backendprop - hostname=localhost - port=4444 - username="cn=Directory Manager" - password=password - backend=userRoot - name=index-entry-limit - value=5000 -''' + opendj_backendprop: + hostname: localhost + port: 4444 + username: "cn=Directory Manager" + password: password + backend: userRoot + name: index-entry-limit + value: 5000 +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py index 46fdea5e27..aa708f08f8 100644 --- a/plugins/modules/openwrt_init.py +++ b/plugins/modules/openwrt_init.py @@ -8,52 +8,50 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: openwrt_init author: - - "Andrew Gaffney (@agaffney)" + - "Andrew Gaffney (@agaffney)" short_description: Manage services on OpenWrt description: - - Controls OpenWrt services on remote hosts. + - Controls OpenWrt services on remote hosts. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the service. - required: true - aliases: ['service'] - state: - type: str - description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. - - V(restarted) will always bounce the service. - - V(reloaded) will always reload. - choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] - enabled: - description: - - Whether the service should start on boot. B(At least one of state and enabled are required.) - type: bool - pattern: - type: str - description: - - If the service does not respond to the 'running' command, name a - substring to look for as would be found in the output of the C(ps) - command as a stand-in for a 'running' result. If the string is found, - the service will be assumed to be running. + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + description: + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. + - V(restarted) will always bounce the service. + - V(reloaded) will always reload. + choices: ['started', 'stopped', 'restarted', 'reloaded'] + enabled: + description: + - Whether the service should start on boot. B(At least one of state and enabled are required). + type: bool + pattern: + type: str + description: + - If the service does not respond to the 'running' command, name a substring to look for as would be found in the output of the C(ps) command + as a stand-in for a 'running' result. If the string is found, the service will be assumed to be running. notes: - - One option other than name is required. + - One option other than O(name) is required. requirements: - - An OpenWrt system (with python) -''' + - An OpenWrt system (with python) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start service httpd, if not running community.general.openwrt_init: state: started @@ -73,10 +71,10 @@ EXAMPLES = ''' community.general.openwrt_init: name: httpd enabled: true -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index 32cb2753f5..40c48f3800 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -11,71 +11,68 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: opkg author: "Patrick Pelletier (@skinp)" short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions description: - - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions + - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of package(s) to install/remove. - - C(NAME=VERSION) syntax is also supported to install a package - in a certain version. See the examples. This only works on Yocto based - Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is - supported since community.general 6.2.0. - aliases: [pkg] - required: true - type: list - elements: str - state: - description: - - State of the package. - choices: [ 'present', 'absent', 'installed', 'removed' ] - default: present - type: str - force: - description: - - The C(opkg --force) parameter used. - - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain the same behavior. - choices: - - "" - - "depends" - - "maintainer" - - "reinstall" - - "overwrite" - - "downgrade" - - "space" - - "postinstall" - - "remove" - - "checksum" - - "removal-of-dependent-packages" - type: str - update_cache: - description: - - Update the package DB first. - default: false - type: bool - executable: - description: - - The executable location for C(opkg). - type: path - version_added: 7.2.0 + name: + description: + - Name of package(s) to install/remove. + - C(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works on Yocto based Linux + distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0. + aliases: [pkg] + required: true + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'absent', 'installed', 'removed'] + default: present + type: str + force: + description: + - The C(opkg --force) parameter used. + - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain the same behavior. + choices: + - "" + - "depends" + - "maintainer" + - "reinstall" + - "overwrite" + - "downgrade" + - "space" + - "postinstall" + - "remove" + - "checksum" + - "removal-of-dependent-packages" + type: str + update_cache: + description: + - Update the package DB first. + default: false + type: bool + executable: + description: + - The executable location for C(opkg). + type: path + version_added: 7.2.0 requirements: - - opkg - - python -''' + - opkg + - python +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install foo community.general.opkg: name: foo @@ -109,9 +106,9 @@ EXAMPLES = ''' name: foo state: present force: overwrite -''' +""" -RETURN = """ +RETURN = r""" version: description: Version of opkg. type: str diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py index db5d889a37..1cc541377c 100644 --- a/plugins/modules/osx_defaults.py +++ b/plugins/modules/osx_defaults.py @@ -10,18 +10,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: osx_defaults author: # DO NOT RE-ADD GITHUB HANDLE! -- Franck Nijhof (!UNKNOWN) + - Franck Nijhof (!UNKNOWN) short_description: Manage macOS user defaults description: - - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts. - - macOS applications and other programs use the defaults system to record user preferences and other - information that must be maintained when the applications are not running (such as default font for new - documents, or the position of an Info panel). + - This module allows users to read, write, and delete macOS user defaults from Ansible scripts. + - MacOS applications and other programs use the defaults system to record user preferences and other information that must be maintained when + the applications are not running (such as default font for new documents, or the position of an Info panel). extends_documentation_fragment: - community.general.attributes attributes: @@ -48,7 +46,7 @@ options: description: - The type of value to write. type: str - choices: [ array, bool, boolean, date, float, int, integer, string ] + choices: [array, bool, boolean, date, float, int, integer, string] default: string check_type: description: @@ -72,7 +70,7 @@ options: - The state of the user defaults. - If set to V(list) will query the given parameter specified by O(key). Returns V(null) is nothing found or mis-spelled. type: str - choices: [ absent, list, present ] + choices: [absent, list, present] default: present path: description: @@ -80,10 +78,10 @@ options: type: str default: /usr/bin:/usr/local/bin notes: - - Apple Mac caches defaults. You may need to logout and login to apply the changes. -''' + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set boolean valued key for application domain community.general.osx_defaults: domain: com.apple.Safari @@ -135,7 +133,7 @@ EXAMPLES = r''' domain: com.geekchimp.macable key: ExampleKeyToRemove state: absent -''' +""" from datetime import datetime import re diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py index 58d340e3e9..d69c176fbd 100644 --- a/plugins/modules/ovh_ip_failover.py +++ b/plugins/modules/ovh_ip_failover.py @@ -9,88 +9,79 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_ip_failover short_description: Manage OVH IP failover address description: - - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move - an ip failover (or failover block) between services + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP failover (or failover + block) between services. author: "Pascal HERAUD (@pascalheraud)" notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consumer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) + - Uses the python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with a consumer key as + described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - - ovh >= 0.4.8 + - ovh >= 0.4.8 extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - required: true - description: - - The IP address to manage (can be a single IP like 1.1.1.1 - or a block like 1.1.1.1/28 ) - type: str - service: - required: true - description: - - The name of the OVH service this IP address should be routed - type: str - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - wait_completion: - required: false - default: true - type: bool - description: - - If true, the module will wait for the IP address to be moved. - If false, exit without waiting. The taskId will be returned - in module output - wait_task_completion: - required: false - default: 0 - description: - - If not 0, the module will wait for this task id to be - completed. Use wait_task_completion if you want to wait for - completion of a previously executed task with - wait_completion=false. You can execute this module repeatedly on - a list of failover IPs using wait_completion=false (see examples) - type: int - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - required: false - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. Default is 120 seconds. - type: int + name: + required: true + description: + - The IP address to manage (can be a single IP like V(1.1.1.1) or a block like V(1.1.1.1/28)). + type: str + service: + required: true + description: + - The name of the OVH service this IP address should be routed. + type: str + endpoint: + required: true + description: + - The endpoint to use (for instance V(ovh-eu)). + type: str + wait_completion: + required: false + default: true + type: bool + description: + - If true, the module will wait for the IP address to be moved. If false, exit without waiting. The taskId will be returned in module output. + wait_task_completion: + required: false + default: 0 + description: + - If not 0, the module will wait for this task id to be completed. Use O(wait_task_completion) if you want to wait for completion of a previously + executed task with O(wait_completion=false). You can execute this module repeatedly on a list of failover IPs using O(wait_completion=false) + (see examples). + type: int + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + required: false + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. Default is 120 seconds. + type: int +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Route an IP address 1.1.1.1 to the service ns666.ovh.net - community.general.ovh_ip_failover: name: 1.1.1.1 @@ -116,10 +107,10 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import time diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py index f70b5804a7..0f0ad2f09e 100644 --- a/plugins/modules/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -9,85 +9,80 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_ip_loadbalancing_backend short_description: Manage OVH IP LoadBalancing backends description: - - Manage OVH (French European hosting provider) LoadBalancing IP backends + - Manage OVH (French European hosting provider) LoadBalancing IP backends. author: Pascal Heraud (@pascalheraud) notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consumer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with a consumer key as + described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - - ovh > 0.3.5 + - ovh > 0.3.5 extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - required: true - description: - - Name of the LoadBalancing internal name (ip-X.X.X.X) - type: str - backend: - required: true - description: - - The IP address of the backend to update / modify / delete - type: str - state: - default: present - choices: ['present', 'absent'] - description: - - Determines whether the backend is to be created/modified - or deleted - type: str - probe: - default: 'none' - choices: ['none', 'http', 'icmp' , 'oco'] - description: - - Determines the type of probe to use for this backend - type: str - weight: - default: 8 - description: - - Determines the weight for this backend - type: int - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. - type: int + name: + required: true + description: + - Name of the LoadBalancing internal name (ip-X.X.X.X). + type: str + backend: + required: true + description: + - The IP address of the backend to update / modify / delete. + type: str + state: + default: present + choices: ['present', 'absent'] + description: + - Determines whether the backend is to be created/modified or deleted. + type: str + probe: + default: 'none' + choices: ['none', 'http', 'icmp', 'oco'] + description: + - Determines the type of probe to use for this backend. + type: str + weight: + default: 8 + description: + - Determines the weight for this backend. + type: int + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu). + type: str + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. + type: int +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' ovh_ip_loadbalancing: name: ip-1.1.1.1 @@ -109,10 +104,10 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import time diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py index c2f503e3ad..438bf7db7f 100644 --- a/plugins/modules/ovh_monthly_billing.py +++ b/plugins/modules/ovh_monthly_billing.py @@ -9,52 +9,51 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_monthly_billing author: Francois Lallart (@fraff) version_added: '0.2.0' short_description: Manage OVH monthly billing description: - - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it). -requirements: [ "ovh" ] + - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it). +requirements: ["ovh"] extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - project_id: - required: true - type: str - description: - - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) - instance_id: - required: true - type: str - description: - - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) - endpoint: - type: str - description: - - The endpoint to use (for instance ovh-eu) - application_key: - type: str - description: - - The applicationKey to use - application_secret: - type: str - description: - - The application secret to use - consumer_key: - type: str - description: - - The consumer key to use -''' + project_id: + required: true + type: str + description: + - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET). + instance_id: + required: true + type: str + description: + - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET). + endpoint: + type: str + description: + - The endpoint to use (for instance V(ovh-eu)). + application_key: + type: str + description: + - The applicationKey to use. + application_secret: + type: str + description: + - The application secret to use. + consumer_key: + type: str + description: + - The consumer key to use. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic usage, using auth from /etc/ovh.conf community.general.ovh_monthly_billing: project_id: 0c727a20aa144485b70c44dee9123b46 @@ -75,10 +74,10 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import traceback From bef82e28a295dcc9f932fe972d1b79f8c2f7d44b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 20:22:24 +1300 Subject: [PATCH 410/482] p[a-e]*: normalize docs (#9372) * p[a-e]*: normalize docs * Update plugins/modules/packet_volume.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/pacemaker_cluster.py | 88 +++---- plugins/modules/packet_device.py | 202 +++++++------- plugins/modules/packet_ip_subnet.py | 65 +++-- plugins/modules/packet_project.py | 33 ++- plugins/modules/packet_sshkey.py | 52 ++-- plugins/modules/packet_volume.py | 58 ++-- plugins/modules/packet_volume_attachment.py | 46 ++-- plugins/modules/pacman.py | 277 ++++++++++---------- plugins/modules/pacman_key.py | 137 +++++----- plugins/modules/pagerduty.py | 138 +++++----- plugins/modules/pagerduty_alert.py | 271 ++++++++++--------- plugins/modules/pagerduty_change.py | 15 +- plugins/modules/pagerduty_user.py | 99 ++++--- plugins/modules/pam_limits.py | 60 ++--- plugins/modules/pamd.py | 97 +++---- plugins/modules/parted.py | 75 +++--- plugins/modules/pear.py | 99 ++++--- 17 files changed, 867 insertions(+), 945 deletions(-) diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py index 60d8656ac3..af8bb5ff56 100644 --- a/plugins/modules/pacemaker_cluster.py +++ b/plugins/modules/pacemaker_cluster.py @@ -8,71 +8,59 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pacemaker_cluster short_description: Manage pacemaker clusters author: - Mathieu Bultel (@matbu) description: - - This module can manage a pacemaker cluster and nodes from Ansible using - the pacemaker cli. + - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Indicate desired state of the cluster - choices: [ cleanup, offline, online, restart ] - type: str - node: - description: - - Specify which node of the cluster you want to manage. None == the - cluster status itself, 'all' == check the status of all nodes. - type: str - timeout: - description: - - Timeout when the module should considered that the action has failed - default: 300 - type: int - force: - description: - - Force the change of the cluster state - type: bool - default: true -''' -EXAMPLES = ''' ---- + state: + description: + - Indicate desired state of the cluster. + choices: [cleanup, offline, online, restart] + type: str + node: + description: + - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status of all nodes. + type: str + timeout: + description: + - Timeout when the module should considered that the action has failed. + default: 300 + type: int + force: + description: + - Force the change of the cluster state. + type: bool + default: true +""" + +EXAMPLES = r""" - name: Set cluster Online hosts: localhost gather_facts: false tasks: - - name: Get cluster state - community.general.pacemaker_cluster: - state: online -''' + - name: Get cluster state + community.general.pacemaker_cluster: + state: online +""" -RETURN = ''' -changed: - description: true if the cluster state has changed - type: bool - returned: always +RETURN = r""" out: - description: The output of the current state of the cluster. It return a - list of the nodes state. - type: str - sample: 'out: [[" overcloud-controller-0", " Online"]]}' - returned: always -rc: - description: exit code of the module - type: bool - returned: always -''' + description: The output of the current state of the cluster. It returns a list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +""" import time diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py index 519a7031e1..13dbbb9ff3 100644 --- a/plugins/modules/packet_device.py +++ b/plugins/modules/packet_device.py @@ -10,26 +10,23 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_device short_description: Manage a bare metal server in the Packet Host description: - - Manage a bare metal server in the Packet Host (a "device" in the API terms). - - When the machine is created it can optionally wait for public IP address, or for active state. - - This module has a dependency on packet >= 1.0. - - API is documented at U(https://www.packet.net/developers/api/devices). - - + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). author: - - Tomas Karasek (@t0mk) - - Matt Baldwin (@baldwinSPC) - - Thibaud Morel l'Horset (@teebes) + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -45,7 +42,7 @@ options: count: description: - - The number of devices to create. Count number can be included in hostname via the %d string formatter. + - The number of devices to create. Count number can be included in hostname using the C(%d) string formatter. default: 1 type: int @@ -122,7 +119,7 @@ options: user_data: description: - - Userdata blob made available to the machine + - Userdata blob made available to the machine. type: str wait_for_public_IPv: @@ -130,7 +127,7 @@ options: - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - If set to 4, it will wait until IPv4 is assigned to the instance. - If set to 6, wait until public IPv6 is assigned to the instance. - choices: [4,6] + choices: [4, 6] type: int wait_timeout: @@ -156,11 +153,10 @@ options: requirements: - - "packet-python >= 1.35" + - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in environment variable PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. @@ -169,13 +165,13 @@ EXAMPLES = ''' - name: Create 1 device hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - tags: ci-xyz - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + tags: ci-xyz + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 # Create the same device and wait until it is in state "active", (when it's # ready for other API operations). Fail if the device is not "active" in @@ -184,64 +180,64 @@ EXAMPLES = ''' - name: Create device and wait up to 10 minutes for active state hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - wait_timeout: 600 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 - name: Create 3 ubuntu devices called server-01, server-02 and server-03 hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: server-%02d - count: 3 - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 - name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH hosts: localhost tasks: - - name: Create 3 devices and register their facts - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_stable - plan: baremetal_0 - facility: ewr1 - locked: true - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - wait_for_public_IPv: 4 - user_data: | - #cloud-config - ssh_authorized_keys: - - {{ lookup('file', 'my_packet_sshkey') }} - coreos: - etcd: - discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 - addr: $private_ipv4:4001 - peer-addr: $private_ipv4:7001 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd.service - command: start - - name: fleet.service - command: start - register: newhosts + - name: Create 3 devices and register their facts + community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts - - name: Wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - with_items: "{{ newhosts.devices }}" + - name: Wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" # Other states of devices @@ -249,38 +245,38 @@ EXAMPLES = ''' - name: Remove 3 devices by uuid hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - state: absent - device_ids: - - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - - 6bb4faf8-a638-4ac7-8f47-86fe514c301f -''' + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +""" -RETURN = ''' +RETURN = r""" changed: - description: True if a device was altered in any way (created, modified or removed) - type: bool - sample: true - returned: success + description: True if a device was altered in any way (created, modified or removed). + type: bool + sample: true + returned: success devices: - description: Information about each device that was processed - type: list - sample: - - { - "hostname": "my-server.com", - "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", - "private-ipv4": "10.0.15.12", - "tags": [], - "locked": false, - "state": "provisioning", - "public_ipv6": "2604:1380:2:5200::3" - } - returned: success -''' # NOQA + description: Information about each device that was processed + type: list + sample: + - { + "hostname": "my-server.com", + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", + "public_ipv4": "147.229.15.12", + "private-ipv4": "10.0.15.12", + "tags": [], + "locked": false, + "state": "provisioning", + "public_ipv6": "2604:1380:2:5200::3" + } + returned: success +""" import os diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py index 530cfe3a79..c2c9fcead4 100644 --- a/plugins/modules/packet_ip_subnet.py +++ b/plugins/modules/packet_ip_subnet.py @@ -10,26 +10,24 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_ip_subnet short_description: Assign IP subnet to a bare metal server description: - - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. - - IPv4 subnets must come from already reserved block. - - IPv6 subnets must come from publicly routable /56 block from your project. - - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. - + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -77,7 +75,8 @@ options: state: description: - Desired state of the IP subnet on the specified device. - - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified device. + - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified + device. - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified devices. - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it's assigned to. choices: ['present', 'absent'] @@ -85,10 +84,10 @@ options: type: str requirements: - - "packet-python >= 1.35" -''' + - "packet-python >= 1.35" +""" -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. @@ -96,33 +95,33 @@ EXAMPLES = ''' hosts: localhost tasks: - - packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active # Pick an IPv4 address from a block allocated to your project. - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostname: myserver - cidr: "147.75.201.78/32" + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" # Release IP address 147.75.201.78 - name: Unassign IP address from any device in your project hosts: localhost tasks: - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - cidr: "147.75.201.78/32" - state: absent -''' + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +""" -RETURN = ''' +RETURN = r""" changed: description: True if an IP address assignments were altered in any way (created or removed). type: bool @@ -140,7 +139,7 @@ subnet: sample: address: 147.75.90.241 address_family: 4 - assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } + assigned_to: {href: /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0} cidr: 31 created_at: '2017-08-07T15:15:30Z' enabled: true @@ -153,7 +152,7 @@ subnet: network: 147.75.90.240 public: true returned: success -''' +""" import uuid diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py index d8c991dba2..f6acdec152 100644 --- a/plugins/modules/packet_project.py +++ b/plugins/modules/packet_project.py @@ -10,24 +10,22 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_project short_description: Create/delete a project in Packet host description: - - Create/delete a project in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#projects). - + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -55,9 +53,9 @@ options: type: str name: - description: - - Name for/of the project. - type: str + description: + - Name for/of the project. + type: str org_id: description: @@ -76,11 +74,10 @@ options: type: str requirements: - - "packet-python >= 1.40" + - "packet-python >= 1.40" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -110,9 +107,9 @@ EXAMPLES = ''' community.general.packet_project: name: "newer project" payment_method: "the other visa" -''' +""" -RETURN = ''' +RETURN = r""" changed: description: True if a project was created or removed. type: bool @@ -128,7 +125,7 @@ id: description: UUID of addressed project. type: str returned: success -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py index 6519735dcc..8172482108 100644 --- a/plugins/modules/packet_sshkey.py +++ b/plugins/modules/packet_sshkey.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_sshkey short_description: Create/delete an SSH key in Packet host description: - - Create/delete an SSH key in Packet host. - - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). author: "Tomas Karasek (@t0mk) " extends_documentation_fragment: - community.general.attributes @@ -26,42 +25,41 @@ attributes: options: state: description: - - Indicate desired state of the target. + - Indicate desired state of the target. default: present choices: ['present', 'absent'] type: str auth_token: description: - - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str label: description: - - Label for the key. If you keep it empty, it will be read from key string. + - Label for the key. If you keep it empty, it will be read from key string. type: str aliases: [name] id: description: - - UUID of the key which you want to remove. + - UUID of the key which you want to remove. type: str fingerprint: description: - - Fingerprint of the key which you want to remove. + - Fingerprint of the key which you want to remove. type: str key: description: - - Public Key string ({type} {base64 encoded key} {description}). + - Public Key string (V({type} {base64 encoded key} {description})). type: str key_file: description: - - File with the public key. + - File with the public key. type: path requirements: - packet-python +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -84,27 +82,27 @@ EXAMPLES = ''' community.general.packet_sshkey: state: absent id: eef49903-7a09-4ca1-af67-4087c29ab5b6 -''' +""" -RETURN = ''' +RETURN = r""" changed: - description: True if a sshkey was created or removed. - type: bool - sample: true - returned: always + description: True if a sshkey was created or removed. + type: bool + sample: true + returned: always sshkeys: description: Information about sshkeys that were created/removed. type: list sample: [ - { - "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", - "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", - "label": "mynewkey33" - } + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBA ... MdDxfmcsCslJKgoRKSmQpCwXQtN2g== user@server", + "label": "mynewkey33" + } ] returned: always -''' # NOQA +""" import os import uuid diff --git a/plugins/modules/packet_volume.py b/plugins/modules/packet_volume.py index 659e8d8aa3..229d63a756 100644 --- a/plugins/modules/packet_volume.py +++ b/plugins/modules/packet_volume.py @@ -9,24 +9,22 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume short_description: Create/delete a volume in Packet host description: - - Create/delete a volume in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#volumes). - + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -55,14 +53,13 @@ options: name: description: - - Selector for API-generated name of the volume + - Selector for API-generated name of the volume. type: str description: description: - User-defined description attribute for Packet volume. - - "It is used used as idempotent identifier - if volume with given - description exists, new one is not created." + - It is used used as idempotent identifier - if volume with given description exists, new one is not created. type: str id: @@ -72,7 +69,7 @@ options: plan: description: - - storage_1 for standard tier, storage_2 for premium (performance) tier. + - V(storage_1) for standard tier, V(storage_2) for premium (performance) tier. - Tiers are described at U(https://www.packet.com/cloud/storage/). choices: ['storage_1', 'storage_2'] default: 'storage_1' @@ -91,7 +88,7 @@ options: locked: description: - - Create new volume locked. + - Create new volume locked. type: bool default: false @@ -123,10 +120,9 @@ options: requirements: - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -154,25 +150,25 @@ EXAMPLES = ''' id: "{{ result_create.id }}" project_id: "{{ project_id }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" id: - description: UUID of specified volume - type: str - returned: success - sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c + description: UUID of specified volume. + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c name: - description: The API-generated name of the volume resource. - type: str - returned: if volume is attached/detached to/from some device - sample: "volume-a91dc506" + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" description: - description: The user-defined description of the volume resource. - type: str - returned: success - sample: "Just another volume" -''' + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +""" import uuid diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py index a46fef55cb..0423cc879d 100644 --- a/plugins/modules/packet_volume_attachment.py +++ b/plugins/modules/packet_volume_attachment.py @@ -10,27 +10,24 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume_attachment short_description: Attach/detach a volume to a device in the Packet host description: - - Attach/detach a volume to a device in the Packet host. - - API is documented at U(https://www.packet.com/developers/api/volumes/). - - "This module creates the attachment route in the Packet API. In order to discover - the block devices on the server, you have to run the Attach Scripts, - as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." - + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you have to run the Attach + Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -61,7 +58,7 @@ options: description: - Selector for the volume. - It can be a UUID, an API-generated volume name, or user-defined description string. - - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' + - 'Example values: V(4a347482-b546-4f67-8300-fb5018ef0c5), V(volume-4a347482), V(my volume).' type: str required: true @@ -69,15 +66,14 @@ options: description: - Selector for the device. - It can be a UUID of the device, or a hostname. - - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device".' type: str requirements: - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -122,19 +118,19 @@ EXAMPLES = ''' volume: "{{ volname }}" device: "{{ devname }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" volume_id: - description: UUID of volume addressed by the module call. - type: str - returned: success + description: UUID of volume addressed by the module call. + type: str + returned: success device_id: - description: UUID of device addressed by the module call. - type: str - returned: success -''' + description: UUID of device addressed by the module call. + type: str + returned: success +""" import uuid diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py index f13bde317c..a4a9370ae0 100644 --- a/plugins/modules/pacman.py +++ b/plugins/modules/pacman.py @@ -12,172 +12,161 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pacman short_description: Manage packages with I(pacman) description: - - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. author: - - Indrajit Raychaudhuri (@indrajitr) - - Aaron Bull Schaefer (@elasticdog) - - Maxime de Roucy (@tchernomax) - - Jean Raby (@jraby) + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) + - Jean Raby (@jraby) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - name: - description: - - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. - Cannot be used in combination with O(upgrade). - aliases: [ package, pkg ] - type: list - elements: str - - state: - description: - - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. - - V(present) and V(installed) will simply ensure that a desired package is installed. - - V(latest) will update the specified package if it is not of the latest available version. - - V(absent) and V(removed) will remove the specified package. - default: present - choices: [ absent, installed, latest, present, removed ] - type: str - - force: - description: - - When removing packages, forcefully remove them, without any checks. - Same as O(extra_args="--nodeps --nodeps"). - - When combined with O(update_cache), force a refresh of all package databases. - Same as O(update_cache_extra_args="--refresh --refresh"). - default: false - type: bool - - remove_nosave: - description: - - When removing packages, do not save modified configuration files as C(.pacsave) files. - (passes C(--nosave) to pacman) - version_added: 4.6.0 - default: false - type: bool - - executable: - description: - - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. - - Pacman compatibility is unfortunately ill defined, in particular, this modules makes - extensive use of the C(--print-format) directive which is known not to be implemented by - some AUR helpers (notably, C(yay)). - - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. - default: pacman - type: str - version_added: 3.1.0 - - extra_args: - description: - - Additional option to pass to pacman when enforcing O(state). - default: '' - type: str - - update_cache: - description: - - Whether or not to refresh the master package lists. - - This can be run as part of a package installation or as a separate step. - - If not specified, it defaults to V(false). - - Please note that this option only had an influence on the module's C(changed) state - if O(name) and O(upgrade) are not specified before community.general 5.0.0. - See the examples for how to keep the old behavior. - type: bool - - update_cache_extra_args: - description: - - Additional option to pass to pacman when enforcing O(update_cache). - default: '' - type: str - - upgrade: - description: - - Whether or not to upgrade the whole system. - Cannot be used in combination with O(name). - - If not specified, it defaults to V(false). - type: bool - - upgrade_extra_args: - description: - - Additional option to pass to pacman when enforcing O(upgrade). - default: '' - type: str - - reason: - description: - - The install reason to set for the packages. - choices: [ dependency, explicit ] - type: str - version_added: 5.4.0 - - reason_for: - description: - - Set the install reason for V(all) packages or only for V(new) packages. - - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new). - default: new - choices: [ all, new ] - type: str - version_added: 5.4.0 - -notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. - - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. - For example, a dedicated build user with permissions to install packages could be necessary. - - > - In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages - with the error: C(error: target not found: ). -""" - -RETURN = """ -packages: + name: description: - - A list of packages that have been changed. - - Before community.general 4.5.0 this was only returned when O(upgrade=true). - In community.general 4.5.0, it was sometimes omitted when the package list is empty, - but since community.general 4.6.0 it is always returned when O(name) is specified or - O(upgrade=true). - returned: success and O(name) is specified or O(upgrade=true) + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with O(upgrade). + aliases: [package, pkg] type: list elements: str - sample: [ package, other-package ] -cache_updated: + state: description: - - The changed status of C(pacman -Sy). - - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). - returned: success, when O(update_cache=true) + - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. + - V(present) and V(installed) will simply ensure that a desired package is installed. + - V(latest) will update the specified package if it is not of the latest available version. + - V(absent) and V(removed) will remove the specified package. + default: present + choices: [absent, installed, latest, present, removed] + type: str + + force: + description: + - When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps"). + - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh --refresh"). + default: false type: bool - sample: false + + remove_nosave: + description: + - When removing packages, do not save modified configuration files as C(.pacsave) files. (passes C(--nosave) to pacman). version_added: 4.6.0 + default: false + type: bool -stdout: + executable: description: - - Output from pacman. - returned: success, when needed + - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) directive + which is known not to be implemented by some AUR helpers (notably, C(yay)). + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman type: str - sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." - version_added: 4.1.0 + version_added: 3.1.0 -stderr: + extra_args: description: - - Error output from pacman. - returned: success, when needed + - Additional option to pass to pacman when enforcing O(state). + default: '' type: str - sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." - version_added: 4.1.0 + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + - If not specified, it defaults to V(false). + - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are not specified before + community.general 5.0.0. See the examples for how to keep the old behavior. + type: bool + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing O(update_cache). + default: '' + type: str + + upgrade: + description: + - Whether or not to upgrade the whole system. Cannot be used in combination with O(name). + - If not specified, it defaults to V(false). + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing O(upgrade). + default: '' + type: str + + reason: + description: + - The install reason to set for the packages. + choices: [dependency, explicit] + type: str + version_added: 5.4.0 + + reason_for: + description: + - Set the install reason for V(all) packages or only for V(new) packages. + - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new). + default: new + choices: [all, new] + type: str + version_added: 5.4.0 + +notes: + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) + option. + - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated build user with + permissions to install packages could be necessary. + - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: C(error: target not + found: ).' """ -EXAMPLES = """ +RETURN = r""" +packages: + description: + - A list of packages that have been changed. + - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes omitted when the + package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified or O(upgrade=true). + returned: success and O(name) is specified or O(upgrade=true) + type: list + elements: str + sample: [package, other-package] + +cache_updated: + description: + - The changed status of C(pacman -Sy). + - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). + returned: success, when O(update_cache=true) + type: bool + sample: false + version_added: 4.6.0 + +stdout: + description: + - Output from pacman. + returned: success, when needed + type: str + sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." + version_added: 4.1.0 + +stderr: + description: + - Error output from pacman. + returned: success, when needed + type: str + sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." + version_added: 4.1.0 +""" + +EXAMPLES = r""" - name: Install package foo from repo community.general.pacman: name: foo diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py index 4b7b2639ec..f98fb6f8a3 100644 --- a/plugins/modules/pacman_key.py +++ b/plugins/modules/pacman_key.py @@ -8,84 +8,83 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pacman_key author: - - George Rawlinson (@grawlinson) + - George Rawlinson (@grawlinson) version_added: "3.2.0" short_description: Manage pacman's list of trusted keys description: - - Add or remove gpg keys from the pacman keyring. + - Add or remove gpg keys from the pacman keyring. notes: - - Use full-length key ID (40 characters). - - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden. - - Keys will be locally signed after being imported into the keyring. - - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified. - - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. + - Use full-length key ID (40 characters). + - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden. + - Keys will be locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified. + - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. requirements: - - gpg - - pacman-key + - gpg + - pacman-key extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - id: - description: - - The 40 character identifier of the key. - - Including this allows check mode to correctly report the changed state. - - Do not specify a subkey ID, instead specify the primary key ID. - required: true - type: str - data: - description: - - The keyfile contents to add to the keyring. - - Must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - file: - description: - - The path to a keyfile on the remote server to add to the keyring. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: path - url: - description: - - The URL to retrieve keyfile from. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - keyserver: - description: - - The keyserver used to retrieve key from. - type: str - verify: - description: - - Whether or not to verify the keyfile's key ID against specified key ID. - type: bool - default: true - force_update: - description: - - This forces the key to be updated if it already exists in the keyring. - type: bool - default: false - keyring: - description: - - The full path to the keyring folder on the remote server. - - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)). - - Useful if the remote system requires an alternative gnupg directory. - type: path - default: /etc/pacman.d/gnupg - state: - description: - - Ensures that the key is present (added) or absent (revoked). - default: present - choices: [ absent, present ] - type: str -''' + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is present (added) or absent (revoked). + default: present + choices: [absent, present] + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Import a key via local file community.general.pacman_key: id: 01234567890ABCDE01234567890ABCDE12345678 @@ -119,9 +118,9 @@ EXAMPLES = ''' community.general.pacman_key: id: 01234567890ABCDE01234567890ABCDE12345678 state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import os.path import tempfile diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py index 853bd6d797..8d83374c34 100644 --- a/plugins/modules/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -9,84 +9,82 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pagerduty short_description: Create PagerDuty maintenance windows description: - - This module will let you create PagerDuty maintenance windows + - This module will let you create PagerDuty maintenance windows. author: - - "Andrew Newdigate (@suprememoocow)" - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" - - "Bruce Pennypacker (@bpennypacker)" + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" requirements: - - PagerDuty API access + - PagerDuty API access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - choices: [ "running", "started", "ongoing", "absent" ] - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - user: - type: str - description: - - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. - token: - type: str - description: - - A pagerduty token, generated on the pagerduty site. It is used for authorization. - required: true - requester_id: - type: str - description: - - ID of user making the request. Only needed when creating a maintenance_window. - service: - type: list - elements: str - description: - - A comma separated list of PagerDuty service IDs. - aliases: [ services ] - window_id: - type: str - description: - - ID of maintenance window. Only needed when absent a maintenance_window. - hours: - type: str - description: - - Length of maintenance window in hours. - default: '1' - minutes: - type: str - description: - - Maintenance window in minutes (this is added to the hours). - default: '0' - desc: - type: str - description: - - Short description of maintenance window. - default: Created by Ansible - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: true -''' + state: + type: str + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: ["running", "started", "ongoing", "absent"] + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + type: str + description: + - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. + token: + type: str + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + type: str + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + type: list + elements: str + description: + - A comma separated list of PagerDuty service IDs. + aliases: [services] + window_id: + type: str + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + type: str + description: + - Length of maintenance window in hours. + default: '1' + minutes: + type: str + description: + - Maintenance window in minutes (this is added to the hours). + default: '0' + desc: + type: str + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List ongoing maintenance windows using a token community.general.pagerduty: name: companyabc @@ -143,7 +141,7 @@ EXAMPLES = ''' token: yourtoken state: absent window_id: "{{ pd_window.result.maintenance_windows[0].id }}" -''' +""" import datetime import json diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py index 3c0327e5ab..050dcd17e9 100644 --- a/plugins/modules/pagerduty_alert.py +++ b/plugins/modules/pagerduty_alert.py @@ -8,150 +8,149 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pagerduty_alert short_description: Trigger, acknowledge or resolve PagerDuty incidents description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events + - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events. author: - - "Amanpreet Singh (@ApsOps)" - - "Xiao Shen (@xshen1)" + - "Amanpreet Singh (@ApsOps)" + - "Xiao Shen (@xshen1)" requirements: - - PagerDuty API access + - PagerDuty API access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - api_key: - type: str - description: - - The pagerduty API key (readonly access), generated on the pagerduty site. - - Required if O(api_version=v1). - integration_key: - type: str - description: - - The GUID of one of your 'Generic API' services. - - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. - service_id: - type: str - description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. - - Required if O(api_version=v1). - service_key: - type: str - description: - - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). - state: - type: str - description: - - Type of event to be sent. - required: true - choices: - - 'triggered' - - 'acknowledged' - - 'resolved' - api_version: - type: str - description: - - The API version we want to use to run the module. - - V1 is more limited with option we can provide to trigger incident. - - V2 has more variables for example, O(severity), O(source), O(custom_details), etc. - default: 'v1' - choices: - - 'v1' - - 'v2' - version_added: 7.4.0 - client: - type: str - description: - - The name of the monitoring client that is triggering this event. - required: false - client_url: - type: str - description: - - The URL of the monitoring client that is triggering this event. - required: false - component: - type: str - description: - - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). - required: false - version_added: 7.4.0 - custom_details: - type: dict - description: - - Additional details about the event and affected system. - - A dictionary with custom keys and values. - required: false - version_added: 7.4.0 - desc: - type: str - description: - - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) - will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. - The maximum length is 1024 characters. - - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event. - required: false - default: Created via Ansible - incident_class: - type: str - description: - - The class/type of the event, for example C(ping failure) or C(cpu load). - required: false - version_added: 7.4.0 - incident_key: - type: str - description: - - Identifies the incident to which this O(state) should be applied. - - For O(state=triggered) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup' - problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty. - - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened by a - trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false - link_url: - type: str - description: - - Relevant link url to the alert. For example, the website or the job link. - required: false - version_added: 7.4.0 - link_text: - type: str - description: - - A short description of the link_url. - required: false - version_added: 7.4.0 - source: - type: str - description: - - The unique location of the affected system, preferably a hostname or FQDN. - - Required in case of O(state=trigger) and O(api_version=v2). - required: false - version_added: 7.4.0 - severity: - type: str - description: - - The perceived severity of the status the event is describing with respect to the affected system. - - Required in case of O(state=trigger) and O(api_version=v2). - default: 'critical' - choices: - - 'critical' - - 'warning' - - 'error' - - 'info' - version_added: 7.4.0 -''' + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + api_key: + type: str + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + - Required if O(api_version=v1). + integration_key: + type: str + description: + - The GUID of one of your 'Generic API' services. + - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. + service_id: + type: str + description: + - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. + - Required if O(api_version=v1). + service_key: + type: str + description: + - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). + state: + type: str + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_version: + type: str + description: + - The API version we want to use to run the module. + - V1 is more limited with option we can provide to trigger incident. + - V2 has more variables for example, O(severity), O(source), O(custom_details) and so on. + default: 'v1' + choices: + - 'v1' + - 'v2' + version_added: 7.4.0 + client: + type: str + description: + - The name of the monitoring client that is triggering this event. + required: false + client_url: + type: str + description: + - The URL of the monitoring client that is triggering this event. + required: false + component: + type: str + description: + - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). + required: false + version_added: 7.4.0 + custom_details: + type: dict + description: + - Additional details about the event and affected system. + - A dictionary with custom keys and values. + required: false + version_added: 7.4.0 + desc: + type: str + description: + - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will + be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The + maximum length is 1024 characters. + - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event. + required: false + default: Created via Ansible + incident_class: + type: str + description: + - The class/type of the event, for example C(ping failure) or C(cpu load). + required: false + version_added: 7.4.0 + incident_key: + type: str + description: + - Identifies the incident to which this O(state) should be applied. + - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one will be created. If there is already an + open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup' + problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty. + - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened + by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + required: false + link_url: + type: str + description: + - Relevant link URL to the alert. For example, the website or the job link. + required: false + version_added: 7.4.0 + link_text: + type: str + description: + - A short description of the O(link_url). + required: false + version_added: 7.4.0 + source: + type: str + description: + - The unique location of the affected system, preferably a hostname or FQDN. + - Required in case of O(state=trigger) and O(api_version=v2). + required: false + version_added: 7.4.0 + severity: + type: str + description: + - The perceived severity of the status the event is describing with respect to the affected system. + - Required in case of O(state=trigger) and O(api_version=v2). + default: 'critical' + choices: + - 'critical' + - 'warning' + - 'error' + - 'info' + version_added: 7.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Trigger an incident with just the basic options community.general.pagerduty_alert: name: companyabc @@ -226,7 +225,7 @@ EXAMPLES = ''' integration_key: xxx incident_key: somekey state: resolved -''' +""" import json from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py index acd31fb447..39353f7575 100644 --- a/plugins/modules/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pagerduty_change short_description: Track a code or infrastructure change as a PagerDuty change event version_added: 1.3.0 @@ -31,8 +31,7 @@ attributes: options: integration_key: description: - - The integration key that identifies the service the change was made to. - This can be found by adding an integration to a service in PagerDuty. + - The integration key that identifies the service the change was made to. This can be found by adding an integration to a service in PagerDuty. required: true type: str summary: @@ -82,14 +81,14 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates for the target URL will not be validated. - This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled sites using + self-signed certificates. required: false default: true type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Track the deployment as a PagerDuty change event community.general.pagerduty_change: integration_key: abc123abc123abc123abc123abc123ab @@ -106,7 +105,7 @@ EXAMPLES = ''' environment: production link_url: https://github.com/ansible-collections/community.general/pull/1269 link_text: View changes on GitHub -''' +""" from ansible.module_utils.urls import fetch_url from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py index eb8a309562..e03342c792 100644 --- a/plugins/modules/pagerduty_user.py +++ b/plugins/modules/pagerduty_user.py @@ -8,64 +8,63 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pagerduty_user short_description: Manage a user account on PagerDuty description: - - This module manages the creation/removal of a user account on PagerDuty. + - This module manages the creation/removal of a user account on PagerDuty. version_added: '1.3.0' author: Zainab Alsaffar (@zanssa) requirements: - - pdpyras python module = 4.1.1 - - PagerDuty API Access + - pdpyras python module = 4.1.1 + - PagerDuty API Access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - access_token: - description: - - An API access token to authenticate with the PagerDuty REST API. - required: true - type: str - pd_user: - description: - - Name of the user in PagerDuty. - required: true - type: str - pd_email: - description: - - The user's email address. - - O(pd_email) is the unique identifier used and cannot be updated using this module. - required: true - type: str - pd_role: - description: - - The user's role. - choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] - default: 'responder' - type: str - state: - description: - - State of the user. - - On V(present), it creates a user if the user doesn't exist. - - On V(absent), it removes a user if the account exists. - choices: ['present', 'absent'] - default: 'present' - type: str - pd_teams: - description: - - The teams to which the user belongs. - - Required if O(state=present). - type: list - elements: str -''' + access_token: + description: + - An API access token to authenticate with the PagerDuty REST API. + required: true + type: str + pd_user: + description: + - Name of the user in PagerDuty. + required: true + type: str + pd_email: + description: + - The user's email address. + - O(pd_email) is the unique identifier used and cannot be updated using this module. + required: true + type: str + pd_role: + description: + - The user's role. + choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] + default: 'responder' + type: str + state: + description: + - State of the user. + - On V(present), it creates a user if the user does not exist. + - On V(absent), it removes a user if the account exists. + choices: ['present', 'absent'] + default: 'present' + type: str + pd_teams: + description: + - The teams to which the user belongs. + - Required if O(state=present). + type: list + elements: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user account on PagerDuty community.general.pagerduty_user: access_token: 'Your_Access_token' @@ -81,9 +80,9 @@ EXAMPLES = r''' pd_user: user_full_name pd_email: user_email state: "absent" -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from os import path from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py index 4ed037a6ff..516b61fec1 100644 --- a/plugins/modules/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -8,11 +8,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pam_limits author: -- "Sebastien Rohaut (@usawa)" + - "Sebastien Rohaut (@usawa)" short_description: Modify Linux PAM limits description: - The M(community.general.pam_limits) module modifies PAM limits. @@ -38,32 +37,32 @@ options: description: - Limit type, see C(man 5 limits.conf) for an explanation. required: true - choices: [ "hard", "soft", "-" ] + choices: ["hard", "soft", "-"] limit_item: type: str description: - The limit to be set. required: true choices: - - "core" - - "data" - - "fsize" - - "memlock" - - "nofile" - - "rss" - - "stack" - - "cpu" - - "nproc" - - "as" - - "maxlogins" - - "maxsyslogins" - - "priority" - - "locks" - - "sigpending" - - "msgqueue" - - "nice" - - "rtprio" - - "chroot" + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" value: type: str description: @@ -74,24 +73,21 @@ options: required: true backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. required: false type: bool default: false use_min: description: - If set to V(true), the minimal value will be used or conserved. - - If the specified value is inferior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content is not modified. required: false type: bool default: false use_max: description: - If set to V(true), the maximal value will be used or conserved. - - If the specified value is superior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If the specified value is superior to the value in the file, file content is replaced with the new value, else content is not modified. required: false type: bool default: false @@ -109,9 +105,9 @@ options: default: '' notes: - If O(dest) file does not exist, it is created. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or modify nofile soft limit for the user joe community.general.pam_limits: domain: joe @@ -141,7 +137,7 @@ EXAMPLES = r''' limit_type: hard limit_item: nofile value: 39693561 -''' +""" import os import re diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py index 0ad4c8787e..6502922bc1 100644 --- a/plugins/modules/pamd.py +++ b/plugins/modules/pamd.py @@ -9,15 +9,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: pamd author: - - Kenneth D. Evensen (@kevensen) + - Kenneth D. Evensen (@kevensen) short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - - In order for a PAM rule to be modified, the type, control and - module_path must match an existing rule. See man(5) pam.d for details. + - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d for details. notes: - This module does not handle authselect profiles. extends_documentation_fragment: @@ -30,8 +29,7 @@ attributes: options: name: description: - - The name generally refers to the PAM service file to - change, for example system-auth. + - The name generally refers to the PAM service file to change, for example system-auth. type: str required: true type: @@ -40,12 +38,11 @@ options: - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] control: description: - The control of the PAM rule being modified. - - This may be a complicated control with brackets. If this is the case, be - sure to put "[bracketed controls]" in quotes. + - This may be a complicated control with brackets. If this is the case, be sure to put "[bracketed controls]" in quotes. - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true @@ -57,55 +54,49 @@ options: required: true new_type: description: - - The new type to assign to the new rule. + - The new type to assign to the new rule. type: str - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] new_control: description: - - The new control to assign to the new rule. + - The new control to assign to the new rule. type: str new_module_path: description: - - The new module path to be assigned to the new rule. + - The new module path to be assigned to the new rule. type: str module_arguments: description: - - When O(state=updated), the O(module_arguments) will replace existing module_arguments. - - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed. - - When O(state=args_present) any args listed in O(module_arguments) are added if - missing from the existing rule. - - Furthermore, if the module argument takes a value denoted by C(=), - the value will be changed to that specified in module_arguments. + - When O(state=updated), the O(module_arguments) will replace existing module_arguments. + - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed. + - When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), the value will be changed to that specified in module_arguments. type: list elements: str state: description: - - The default of V(updated) will modify an existing rule if type, - control and module_path all match an existing rule. - - With V(before), the new rule will be inserted before a rule matching type, - control and module_path. - - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, - control and module_path. - - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. - - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored. - - State V(absent) will remove the rule. + - The default of V(updated) will modify an existing rule if type, control and module_path all match an existing rule. + - With V(before), the new rule will be inserted before a rule matching type, control and module_path. + - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, control and module_path. + - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. + - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored. + - State V(absent) will remove the rule. type: str - choices: [ absent, before, after, args_absent, args_present, updated ] + choices: [absent, before, after, args_absent, args_present, updated] default: updated path: description: - - This is the path to the PAM service files. + - This is the path to the PAM service files. type: path default: /etc/pam.d backup: - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - type: bool - default: false -''' + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Update pamd rule's control in /etc/pam.d/system-auth community.general.pamd: name: system-auth @@ -133,8 +124,7 @@ EXAMPLES = r''' new_module_path: pam_faillock.so state: before -- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \ - existing rule pam_rootok.so +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an existing rule pam_rootok.so community.general.pamd: name: su type: auth @@ -193,8 +183,8 @@ EXAMPLES = r''' control: '[success=1 default=ignore]' module_path: pam_succeed_if.so module_arguments: - - crond - - quiet + - crond + - quiet state: args_present - name: Module arguments requiring commas must be listed as a Yaml list @@ -204,7 +194,7 @@ EXAMPLES = r''' control: required module_path: pam_access.so module_arguments: - - listsep=, + - listsep=, state: args_present - name: Update specific argument value in a rule @@ -226,21 +216,20 @@ EXAMPLES = r''' type: auth module_path: pam_sss.so control: 'requisite' -''' +""" -RETURN = r''' +RETURN = r""" change_count: - description: How many rules were changed. - type: int - sample: 1 - returned: success + description: How many rules were changed. + type: int + sample: 1 + returned: success backupdest: - description: - - "The file name of the backup file, if created." - returned: success - type: str -... -''' + description: + - The file name of the backup file, if created. + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py index b3616a8ecd..43c34ff9e5 100644 --- a/plugins/modules/parted.py +++ b/plugins/modules/parted.py @@ -9,21 +9,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Fabrizio Colonna (@ColOfAbRiX) + - Fabrizio Colonna (@ColOfAbRiX) module: parted short_description: Configure block device partitions description: - - This module allows configuring block device partition using the C(parted) - command line tool. For a full description of the fields and the options - check the GNU parted manual. + - This module allows configuring block device partition using the C(parted) command line tool. For a full description of the fields and the + options check the GNU parted manual. requirements: - This module requires C(parted) version 1.8.3 and above. - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above. - - If the version of C(parted) is below 3.1, it requires a Linux version running - the C(sysfs) file system C(/sys/). + - If the version of C(parted) is below 3.1, it requires a Linux version running the C(sysfs) file system C(/sys/). - Requires the C(resizepart) command when using the O(resize) parameter. extends_documentation_fragment: - community.general.attributes @@ -36,15 +33,14 @@ options: device: description: - The block device (disk) where to operate. - - Regular files can also be partitioned, but it is recommended to create a - loopback device using C(losetup) to easily access its partitions. + - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily access its partitions. type: str required: true align: description: - Set alignment for newly created partitions. Use V(undefined) for parted default alignment. type: str - choices: [ cylinder, minimal, none, optimal, undefined ] + choices: [cylinder, minimal, none, optimal, undefined] default: optimal number: description: @@ -53,46 +49,40 @@ options: type: int unit: description: - - Selects the current default unit that Parted will use to display - locations and capacities on the disk and to interpret those given by the - user if they are not suffixed by an unit. + - Selects the current default unit that Parted will use to display locations and capacities on the disk and to interpret those given by + the user if they are not suffixed by an unit. - When fetching information about a disk, it is recommended to always specify a unit. type: str - choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ] + choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact] default: KiB label: description: - Disk label type or partition table to use. - - If O(device) already contains a different label, it will be changed to O(label) - and any previous partitions will be lost. + - If O(device) already contains a different label, it will be changed to O(label) and any previous partitions will be lost. - A O(name) must be specified for a V(gpt) partition table. type: str - choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ] + choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun] default: msdos part_type: description: - May be specified only with O(label=msdos) or O(label=dvh). - Neither O(part_type) nor O(name) may be used with O(label=sun). type: str - choices: [ extended, logical, primary ] + choices: [extended, logical, primary] default: primary part_start: description: - - Where the partition will start as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers - specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - Where the partition will start as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative + numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). - Using negative values may require setting of O(fs_type) (see notes). type: str default: 0% part_end: description: - - Where the partition will end as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers + - Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative numbers specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). type: str default: 100% name: @@ -108,7 +98,7 @@ options: - Whether to create or delete a partition. - If set to V(info) the module will only return the device information. type: str - choices: [ absent, present, info ] + choices: [absent, present, info] default: info fs_type: description: @@ -124,18 +114,15 @@ options: version_added: '1.3.0' notes: - - When fetching information about a new disk and when the version of parted - installed on the system is before version 3.1, the module queries the kernel - through C(/sys/) to obtain disk information. In this case the units CHS and - CYL are not supported. - - Negative O(part_start) start values were rejected if O(fs_type) was not given. - This bug was fixed in parted 3.2.153. If you want to use negative O(part_start), - specify O(fs_type) as well or make sure your system contains newer parted. -''' + - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, the module queries + the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not supported. + - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If you want to use negative + O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. +""" -RETURN = r''' +RETURN = r""" partition_info: - description: Current partition information + description: Current partition information. returned: success type: complex contains: @@ -146,7 +133,7 @@ partition_info: description: List of device partitions. type: list script: - description: parted script executed by module + description: Parted script executed by module. type: str sample: { "disk": { @@ -177,9 +164,9 @@ partition_info: }], "script": "unit KiB print " } -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a new ext4 primary partition community.general.parted: device: /dev/sdb @@ -204,7 +191,7 @@ EXAMPLES = r''' community.general.parted: device: /dev/sdb number: 2 - flags: [ lvm ] + flags: [lvm] state: present part_start: 1GiB @@ -235,7 +222,7 @@ EXAMPLES = r''' part_end: "100%" resize: true state: present -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py index 36770de6c5..ba8f5f9ca2 100644 --- a/plugins/modules/pear.py +++ b/plugins/modules/pear.py @@ -12,54 +12,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pear short_description: Manage pear/pecl packages description: - - Manage PHP packages with the pear package manager. + - Manage PHP packages with the pear package manager. author: - - Jonathan Lestrelin (@jle64) + - Jonathan Lestrelin (@jle64) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the package to install, upgrade, or remove. - required: true - aliases: [pkg] - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "installed", "latest", "absent", "removed"] - executable: - type: path - description: - - Path to the pear executable. - prompts: - description: - - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. - - Prompts will be processed in the same order as the packages list. - - You can optionally specify an answer to any question in the list. - - If no answer is provided, the list item will only contain the regular expression. - - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." - - You can provide a list containing items with or without answer. - - A prompt list can be shorter or longer than the packages list but will issue a warning. - - If you want to specify that a package will not need prompts in the middle of a list, V(null). - type: list - elements: raw - version_added: 0.2.0 -''' + name: + type: str + description: + - Name of the package to install, upgrade, or remove. + required: true + aliases: [pkg] + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "installed", "latest", "absent", "removed"] + executable: + type: path + description: + - Path to the pear executable. + prompts: + description: + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. + - Prompts will be processed in the same order as the packages list. + - You can optionally specify an answer to any question in the list. + - If no answer is provided, the list item will only contain the regular expression. + - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." + - You can provide a list containing items with or without answer. + - A prompt list can be shorter or longer than the packages list but will issue a warning. + - If you want to specify that a package will not need prompts in the middle of a list, V(null). + type: list + elements: raw + version_added: 0.2.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install pear package community.general.pear: name: Net_URL2 @@ -75,19 +74,18 @@ EXAMPLES = r''' name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\] + - (.*)Enable internal debugging in APCu \[no\] - name: Install pecl package with expected prompt and an answer community.general.pear: name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\]: "yes" + - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once with prompts. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages order. If there + is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages than prompts, + additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -95,10 +93,9 @@ EXAMPLES = r''' - I am a test prompt because gnupg doesnt asks anything - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once skipping the first prompt. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order as the packages order. + If there is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages + than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -115,7 +112,7 @@ EXAMPLES = r''' community.general.pear: name: Net_URL2,pecl/json_post state: absent -''' +""" import os From 9fc3092bb3e8cd7f96dce273b9f19b0efa51bd49 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 20:22:58 +1300 Subject: [PATCH 411/482] s[e-n]*: normalize docs (#9352) * s[e-n]*: normalize docs * Apply suggestions from code review Co-authored-by: Felix Fontein * quote line with : and remove extraneous notes * Update plugins/modules/slack.py Co-authored-by: Felix Fontein * Update plugins/modules/seport.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/sefcontext.py | 94 ++++++------ plugins/modules/selinux_permissive.py | 27 ++-- plugins/modules/selogin.py | 37 +++-- plugins/modules/sendgrid.py | 38 ++--- plugins/modules/sensu_check.py | 65 ++++---- plugins/modules/sensu_client.py | 26 ++-- plugins/modules/sensu_handler.py | 33 ++--- plugins/modules/sensu_silence.py | 30 ++-- plugins/modules/sensu_subscription.py | 35 +++-- plugins/modules/seport.py | 31 ++-- plugins/modules/serverless.py | 26 ++-- plugins/modules/shutdown.py | 33 +++-- plugins/modules/simpleinit_msb.py | 20 +-- plugins/modules/sl_vm.py | 163 ++++++++++---------- plugins/modules/slack.py | 83 +++++------ plugins/modules/slackpkg.py | 64 ++++---- plugins/modules/smartos_image_info.py | 43 +++--- plugins/modules/snap.py | 193 ++++++++++++------------ plugins/modules/snap_alias.py | 71 +++++---- plugins/modules/snmp_facts.py | 204 +++++++++++++------------- 20 files changed, 627 insertions(+), 689 deletions(-) diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py index 19c128fa7b..eeba491f5d 100644 --- a/plugins/modules/sefcontext.py +++ b/plugins/modules/sefcontext.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sefcontext short_description: Manages SELinux file context mapping definitions description: @@ -28,85 +27,82 @@ attributes: options: target: description: - - Target path (expression). + - Target path (expression). type: str required: true - aliases: [ path ] + aliases: [path] ftype: description: - - The file type that should have SELinux contexts applied. - - "The following file type options are available:" - - V(a) for all files, - - V(b) for block devices, - - V(c) for character devices, - - V(d) for directories, - - V(f) for regular files, - - V(l) for symbolic links, - - V(p) for named pipes, - - V(s) for socket files. + - The file type that should have SELinux contexts applied. + - 'The following file type options are available:' + - V(a) for all files, + - V(b) for block devices, + - V(c) for character devices, + - V(d) for directories, + - V(f) for regular files, + - V(l) for symbolic links, + - V(p) for named pipes, + - V(s) for socket files. type: str - choices: [ a, b, c, d, f, l, p, s ] + choices: [a, b, c, d, f, l, p, s] default: a setype: description: - - SELinux type for the specified O(target). + - SELinux type for the specified O(target). type: str substitute: description: - - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent to this path. - - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. + - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent + to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. version_added: 6.4.0 type: str - aliases: [ equal ] + aliases: [equal] seuser: description: - - SELinux user for the specified O(target). - - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. + - SELinux user for the specified O(target). + - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. type: str selevel: description: - - SELinux range for the specified O(target). - - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. + - SELinux range for the specified O(target). + - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. type: str - aliases: [ serange ] + aliases: [serange] state: description: - - Whether the SELinux file context must be V(absent) or V(present). - - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target). + - Whether the SELinux file context must be V(absent) or V(present). + - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target). type: str - choices: [ absent, present ] + choices: [absent, present] default: present reload: description: - - Reload SELinux policy after commit. - - Note that this does not apply SELinux file contexts to existing files. + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. type: bool default: true ignore_selinux_state: description: - - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + - Useful for scenarios (chrooted environment) that you cannot get the real SELinux state. type: bool default: false notes: -- The changes are persistent across reboots. -- O(setype) and O(substitute) are mutually exclusive. -- If O(state=present) then one of O(setype) or O(substitute) is mandatory. -- The M(community.general.sefcontext) module does not modify existing files to the new - SELinux context(s), so it is advisable to first create the SELinux - file contexts before creating files, or run C(restorecon) manually - for the existing files that require the new SELinux file contexts. -- Not applying SELinux fcontexts to existing files is a deliberate - decision as it would be unclear what reported changes would entail - to, and there's no guarantee that applying SELinux fcontext does - not pick up other unrelated prior changes. + - The changes are persistent across reboots. + - O(setype) and O(substitute) are mutually exclusive. + - If O(state=present) then one of O(setype) or O(substitute) is mandatory. + - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable to first create + the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files that require the new SELinux file contexts. + - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes would entail to, and + there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. requirements: -- libselinux-python -- policycoreutils-python + - libselinux-python + - policycoreutils-python author: -- Dag Wieers (@dagwieers) -''' + - Dag Wieers (@dagwieers) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow apache to modify files in /srv/git_repos community.general.sefcontext: target: '/srv/git_repos(/.*)?' @@ -132,11 +128,11 @@ EXAMPLES = r''' - name: Apply new SELinux file context to filesystem ansible.builtin.command: restorecon -irv /srv/git_repos -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import traceback diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py index 80439e1de7..b5c0ee4a61 100644 --- a/plugins/modules/selinux_permissive.py +++ b/plugins/modules/selinux_permissive.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: selinux_permissive short_description: Change permissive domain in SELinux policy description: @@ -25,20 +24,20 @@ attributes: options: domain: description: - - The domain that will be added or removed from the list of permissive domains. + - The domain that will be added or removed from the list of permissive domains. type: str required: true - aliases: [ name ] + aliases: [name] permissive: description: - - Indicate if the domain should or should not be set as permissive. + - Indicate if the domain should or should not be set as permissive. type: bool required: true no_reload: description: - - Disable reloading of the SELinux policy after making change to a domain's permissive setting. - - The default is V(false), which causes policy to be reloaded when a domain changes state. - - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6." + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is V(false), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6.". type: bool default: false store: @@ -47,18 +46,18 @@ options: type: str default: '' notes: - - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). -requirements: [ policycoreutils-python ] + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [policycoreutils-python] author: -- Michael Scherer (@mscherer) -''' + - Michael Scherer (@mscherer) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Change the httpd_t domain to permissive community.general.selinux_permissive: name: httpd_t permissive: true -''' +""" import traceback diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py index 57482b0908..8f1b20c230 100644 --- a/plugins/modules/selogin.py +++ b/plugins/modules/selogin.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: selogin short_description: Manages linux user to SELinux user mapping description: - - Manages linux user to SELinux user mapping + - Manages linux user to SELinux user mapping. extends_documentation_fragment: - community.general.attributes attributes: @@ -25,15 +24,15 @@ options: login: type: str description: - - a Linux user + - A Linux user. required: true seuser: type: str description: - - SELinux user name + - SELinux user name. selevel: type: str - aliases: [ serange ] + aliases: [serange] description: - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. default: s0 @@ -42,7 +41,7 @@ options: description: - Desired mapping value. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] reload: description: - Reload SELinux policy after commit. @@ -50,20 +49,20 @@ options: default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool default: false notes: - - The changes are persistent across reboots - - Not tested on any debian based system -requirements: [ 'libselinux', 'policycoreutils' ] + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: ['libselinux', 'policycoreutils'] author: -- Dan Keder (@dankeder) -- Petr Lautrbach (@bachradsusi) -- James Cassell (@jamescassell) -''' + - Dan Keder (@dankeder) + - Petr Lautrbach (@bachradsusi) + - James Cassell (@jamescassell) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Modify the default user on the system to the guest_u user community.general.selogin: login: __default__ @@ -82,11 +81,11 @@ EXAMPLES = ''' login: '%engineering' seuser: staff_u state: present -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import traceback diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py index b4f6b6eaff..c964fa8c23 100644 --- a/plugins/modules/sendgrid.py +++ b/plugins/modules/sendgrid.py @@ -9,21 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sendgrid short_description: Sends an email with the SendGrid API description: - - "Sends an email with a SendGrid account through their API, not through - the SMTP service." + - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - "This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails." - - "Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid - account." - - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers - you must pip install sendgrid" + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you will need an active SendGrid account. + - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install sendgrid). requirements: - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) extends_documentation_fragment: @@ -98,9 +92,9 @@ options: - The e-mail body content. required: true author: "Matt Makai (@makaimc)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Send an email to a single recipient that the deployment was successful community.general.sendgrid: username: "{{ sendgrid_username }}" @@ -114,16 +108,16 @@ EXAMPLES = r''' - name: Send an email to more than one recipient that the build failed community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "build@mycompany.com" - to_addresses: - - "ops@mycompany.com" - - "devteam@mycompany.com" - subject: "Build failure!." - body: "Unable to pull source repository from Git server." + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." delegate_to: localhost -''' +""" # ======================================= # sendgrid module support methods diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py index 1430d6a6ce..018bb75db3 100644 --- a/plugins/modules/sensu_check.py +++ b/plugins/modules/sensu_check.py @@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_check short_description: Manage Sensu checks description: - Manage the checks that should be run on a machine by I(Sensu). - Most options do not have a default and will not be added to the check definition unless specified. - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, - - they are simply specified for your convenience. + they are simply specified for your convenience. extends_documentation_fragment: - community.general.attributes attributes: @@ -29,127 +28,127 @@ options: name: type: str description: - - The name of the check - - This is the key that is used to determine whether a check exists + - The name of the check. + - This is the key that is used to determine whether a check exists. required: true state: type: str description: - - Whether the check should be present or not - choices: [ 'present', 'absent' ] + - Whether the check should be present or not. + choices: ['present', 'absent'] default: present path: type: str description: - Path to the json file of the check to be added/removed. - Will be created if it does not exist (unless O(state=absent)). - - The parent folders need to exist when O(state=present), otherwise an error will be thrown + - The parent folders need to exist when O(state=present), otherwise an error will be thrown. default: /etc/sensu/conf.d/checks.json backup: description: - Create a backup file (if yes), including the timestamp information so - - you can get the original file back if you somehow clobbered it incorrectly. + you can get the original file back if you somehow clobbered it incorrectly. type: bool default: false command: type: str description: - - Path to the sensu check to run (not required when O(state=absent)) + - Path to the sensu check to run (not required when O(state=absent)). handlers: type: list elements: str description: - - List of handlers to notify when the check fails + - List of handlers to notify when the check fails. subscribers: type: list elements: str description: - - List of subscribers/channels this check should run for - - See sensu_subscribers to subscribe a machine to a channel + - List of subscribers/channels this check should run for. + - See sensu_subscribers to subscribe a machine to a channel. interval: type: int description: - - Check interval in seconds + - Check interval in seconds. timeout: type: int description: - - Timeout for the check + - Timeout for the check. - If not specified, it defaults to 10. ttl: type: int description: - - Time to live in seconds until the check is considered stale + - Time to live in seconds until the check is considered stale. handle: description: - - Whether the check should be handled or not + - Whether the check should be handled or not. - Default is V(false). type: bool subdue_begin: type: str description: - - When to disable handling of check failures + - When to disable handling of check failures. subdue_end: type: str description: - - When to enable handling of check failures + - When to enable handling of check failures. dependencies: type: list elements: str description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled + - Other checks this check depends on, if dependencies fail handling of this check will be disabled. metric: description: - - Whether the check is a metric + - Whether the check is a metric. type: bool default: false standalone: description: - - Whether the check should be scheduled by the sensu client or server - - This option obviates the need for specifying the O(subscribers) option + - Whether the check should be scheduled by the sensu client or server. + - This option obviates the need for specifying the O(subscribers) option. - Default is V(false). type: bool publish: description: - Whether the check should be scheduled at all. - - You can still issue it via the sensu api + - You can still issue it via the sensu API. - Default is V(false). type: bool occurrences: type: int description: - - Number of event occurrences before the handler should take action + - Number of event occurrences before the handler should take action. - If not specified, defaults to 1. refresh: type: int description: - - Number of seconds handlers should wait before taking second action + - Number of seconds handlers should wait before taking second action. aggregate: description: - Classifies the check as an aggregate check, - - making it available via the aggregate API + making it available via the aggregate API. - Default is V(false). type: bool low_flap_threshold: type: int description: - - The low threshold for flap detection + - The low threshold for flap detection. high_flap_threshold: type: int description: - - The high threshold for flap detection + - The high threshold for flap detection. custom: type: dict description: - A hash/dictionary of custom parameters for mixing to the configuration. - - You can't rewrite others module parameters using this + - You can't rewrite others module parameters using this. source: type: str description: - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). author: "Anders Ingemann (@andsens)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Fetch metrics about the CPU load every 60 seconds, # the sensu server has a handler called 'relay' which forwards stats to graphite - name: Get cpu metrics @@ -177,7 +176,7 @@ EXAMPLES = ''' community.general.sensu_check: name: check_disk_capacity state: absent -''' +""" import json import traceback diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py index eca0804b0a..0de0340f33 100644 --- a/plugins/modules/sensu_client.py +++ b/plugins/modules/sensu_client.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_client author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu client configuration @@ -27,8 +26,8 @@ options: state: type: str description: - - Whether the client should be present or not - choices: [ 'present', 'absent' ] + - Whether the client should be present or not. + choices: ['present', 'absent'] default: present name: type: str @@ -49,7 +48,8 @@ options: - The subscriptions array items must be strings. safe_mode: description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the + check. type: bool default: false redact: @@ -99,11 +99,9 @@ options: type: dict description: - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). -notes: - - Check mode is supported -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Minimum possible configuration - name: Configure Sensu client community.general.sensu_client: @@ -146,20 +144,20 @@ EXAMPLES = ''' - name: Delete the Sensu client configuration community.general.sensu_client: state: "absent" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective client configuration, when state is present + description: Effective client configuration, when state is present. returned: success type: dict sample: {'name': 'client', 'subscriptions': ['default']} file: - description: Path to the client configuration file + description: Path to the client configuration file. returned: success type: str sample: "/etc/sensu/conf.d/client.json" -''' +""" import json import os diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py index bbb8dc6129..bd1014f2dd 100644 --- a/plugins/modules/sensu_handler.py +++ b/plugins/modules/sensu_handler.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_handler author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu handler configuration description: - - Manages Sensu handler configuration + - Manages Sensu handler configuration. - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' extends_documentation_fragment: - community.general.attributes @@ -27,8 +26,8 @@ options: state: type: str description: - - Whether the handler should be present or not - choices: [ 'present', 'absent' ] + - Whether the handler should be present or not. + choices: ['present', 'absent'] default: present name: type: str @@ -38,8 +37,8 @@ options: type: type: str description: - - The handler type - choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] + - The handler type. + choices: ['pipe', 'tcp', 'udp', 'transport', 'set'] filter: type: str description: @@ -98,12 +97,10 @@ options: elements: str description: - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' -notes: - - Check mode is supported -''' + - 'NOTE: the handlers attribute is only required for handler sets (that is, handlers configured with O(type=set)).' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Configure a handler that sends event data as STDIN (pipe) - name: Configure IRC Sensu handler community.general.sensu_handler: @@ -146,25 +143,25 @@ EXAMPLES = ''' owner: "sensu" group: "sensu" mode: "0600" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective handler configuration, when state is present + description: Effective handler configuration, when state is present. returned: success type: dict sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} file: - description: Path to the handler configuration file + description: Path to the handler configuration file. returned: success type: str sample: "/etc/sensu/conf.d/handlers/irc.json" name: - description: Name of the handler + description: Name of the handler. returned: success type: str sample: "irc" -''' +""" import json import os diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py index 25dfc239eb..bcb70f9bd1 100644 --- a/plugins/modules/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_silence author: Steven Bambling (@smbambling) short_description: Manage Sensu silence entries description: - - Create and clear (delete) a silence entries via the Sensu API - for subscriptions and checks. + - Create and clear (delete) a silence entries via the Sensu API for subscriptions and checks. extends_documentation_fragment: - community.general.attributes attributes: @@ -36,30 +34,26 @@ options: expire: type: int description: - - If specified, the silence entry will be automatically cleared - after this number of seconds. + - If specified, the silence entry will be automatically cleared after this number of seconds. expire_on_resolve: description: - - If specified as true, the silence entry will be automatically - cleared once the condition it is silencing is resolved. + - If specified as true, the silence entry will be automatically cleared once the condition it is silencing is resolved. type: bool reason: type: str description: - - If specified, this free-form string is used to provide context or - rationale for the reason this silence entry was created. + - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was created. state: type: str description: - - Specifies to create or clear (delete) a silence entry via the Sensu API + - Specifies to create or clear (delete) a silence entry via the Sensu API. default: present choices: ['present', 'absent'] subscription: type: str description: - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. - Example - C(client:server1.example.dev) + - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev). required: true url: type: str @@ -67,9 +61,9 @@ options: - Specifies the URL of the Sensu monitoring host server. required: false default: http://127.0.01:4567 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Silence ALL checks for a given client - name: Silence server1.example.dev community.general.sensu_silence: @@ -98,10 +92,10 @@ EXAMPLES = ''' reason: "{{ item.value.reason }}" creator: "{{ ansible_user_id }}" with_dict: "{{ silence }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py index 0077e2ffa6..e7c78c3290 100644 --- a/plugins/modules/sensu_subscription.py +++ b/plugins/modules/sensu_subscription.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_subscription short_description: Manage Sensu subscriptions description: - - Manage which I(sensu channels) a machine should subscribe to + - Manage which I(sensu channels) a machine should subscribe to. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,41 +25,41 @@ options: name: type: str description: - - The name of the channel + - The name of the channel. required: true state: type: str description: - - Whether the machine should subscribe or unsubscribe from the channel - choices: [ 'present', 'absent' ] + - Whether the machine should subscribe or unsubscribe from the channel. + choices: ['present', 'absent'] required: false default: present path: type: str description: - - Path to the subscriptions json file + - Path to the subscriptions json file. required: false default: /etc/sensu/conf.d/subscriptions.json backup: description: - Create a backup file (if yes), including the timestamp information so you - - can get the original file back if you somehow clobbered it incorrectly. + can get the original file back if you somehow clobbered it incorrectly. type: bool required: false default: false -requirements: [ ] +requirements: [] author: Anders Ingemann (@andsens) -''' +""" -RETURN = ''' +RETURN = r""" reasons: - description: the reasons why the module changed or did not change something - returned: success - type: list - sample: ["channel subscription was absent and state is `present'"] -''' + description: The reasons why the module changed or did not change something. + returned: success + type: list + sample: ["channel subscription was absent and state is `present'"] +""" -EXAMPLES = ''' +EXAMPLES = r""" # Subscribe to the nginx channel - name: Subscribe to nginx checks community.general.sensu_subscription: name=nginx @@ -68,7 +67,7 @@ EXAMPLES = ''' # Unsubscribe from the common checks channel - name: Unsubscribe from common checks community.general.sensu_subscription: name=common state=absent -''' +""" import json import traceback diff --git a/plugins/modules/seport.py b/plugins/modules/seport.py index 964e8f0eda..24311fc56d 100644 --- a/plugins/modules/seport.py +++ b/plugins/modules/seport.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: seport short_description: Manages SELinux network port type definitions description: - - Manages SELinux network port type definitions. + - Manages SELinux network port type definitions. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -34,7 +33,7 @@ options: - Protocol for the specified port. type: str required: true - choices: [ tcp, udp ] + choices: [tcp, udp] setype: description: - SELinux type for the specified port. @@ -44,7 +43,7 @@ options: description: - Desired boolean value. type: str - choices: [ absent, present ] + choices: [absent, present] default: present reload: description: @@ -53,26 +52,26 @@ options: default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool default: false local: description: - - Work with local modifications only. + - Work with local modifications only. type: bool default: false version_added: 5.6.0 notes: - - The changes are persistent across reboots. - - Not tested on any debian based system. + - The changes are persistent across reboots. + - Not tested on any Debian based system. requirements: -- libselinux-python -- policycoreutils-python + - libselinux-python + - policycoreutils-python author: -- Dan Keder (@dankeder) -''' + - Dan Keder (@dankeder) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow Apache to listen on tcp port 8888 community.general.seport: ports: 8888 @@ -110,7 +109,7 @@ EXAMPLES = r''' setype: ssh_port_t state: absent local: true -''' +""" import traceback diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py index 8aa9396d62..937f7dcdea 100644 --- a/plugins/modules/serverless.py +++ b/plugins/modules/serverless.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: serverless short_description: Manages a Serverless Framework project description: @@ -26,11 +25,11 @@ options: description: - Goal state of given stage/project. type: str - choices: [ absent, present ] + choices: [absent, present] default: present serverless_bin_path: description: - - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless + - The path of a serverless framework binary relative to the O(service_path), for example V(node_module/.bin/serverless). type: path service_path: description: @@ -67,16 +66,15 @@ options: type: bool default: false notes: - - Currently, the C(serverless) command must be in the path of the node executing the task. - In the future this may be a flag. + - Currently, the C(serverless) command must be in the path of the node executing the task. In the future this may be a flag. requirements: -- serverless -- yaml + - serverless + - PyYAML author: -- Ryan Scott Brown (@ryansb) -''' + - Ryan Scott Brown (@ryansb) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic deploy of a service community.general.serverless: service_path: '{{ project_dir }}' @@ -103,9 +101,9 @@ EXAMPLES = r''' region: us-east-1 service_path: '{{ project_dir }}' serverless_bin_path: node_modules/.bin/serverless -''' +""" -RETURN = r''' +RETURN = r""" service_name: type: str description: The service name specified in the serverless.yml that was just deployed. @@ -120,7 +118,7 @@ command: description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module. returned: always sample: serverless deploy --stage production -''' +""" import os diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py index d8108425eb..6f2dac14b1 100644 --- a/plugins/modules/shutdown.py +++ b/plugins/modules/shutdown.py @@ -8,14 +8,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: shutdown short_description: Shut down a machine notes: - - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) - to specify locations to search if the default paths do not work. - - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead - the module will attempt to shutdown the system by calling C(systemctl shutdown). + - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations to search if the + default paths do not work. + - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the module will attempt + to shutdown the system by calling C(systemctl shutdown). description: - Shut downs a machine. version_added: "1.1.0" @@ -47,20 +47,21 @@ options: search_paths: description: - Paths to search on the remote machine for the C(shutdown) command. - - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) command. + - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) + command. type: list elements: path default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] seealso: -- module: ansible.builtin.reboot + - module: ansible.builtin.reboot author: - - Matt Davis (@nitzmahone) - - Sam Doran (@samdoran) - - Amin Vakil (@aminvakil) -''' + - Matt Davis (@nitzmahone) + - Sam Doran (@samdoran) + - Amin Vakil (@aminvakil) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Unconditionally shut down the machine with all defaults community.general.shutdown: @@ -71,13 +72,13 @@ EXAMPLES = r''' - name: Shut down a machine with shutdown command in unusual place community.general.shutdown: search_paths: - - '/lib/molly-guard' -''' + - '/lib/molly-guard' +""" -RETURN = r''' +RETURN = r""" shutdown: description: V(true) if the machine has been shut down. returned: always type: bool sample: true -''' +""" diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py index 92738471c2..2b1b865d2c 100644 --- a/plugins/modules/simpleinit_msb.py +++ b/plugins/modules/simpleinit_msb.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: simpleinit_msb short_description: Manage services on Source Mage GNU/Linux version_added: 7.5.0 @@ -38,24 +37,21 @@ options: state: type: str required: false - choices: [ running, started, stopped, restarted, reloaded ] + choices: [running, started, stopped, restarted, reloaded] description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. V(restarted) will always bounce the - service. V(reloaded) will always reload. + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the service. + V(reloaded) will always reload. - At least one of O(state) and O(enabled) are required. - - Note that V(reloaded) will start the - service if it is not already started, even if your chosen init - system would not normally. + - Note that V(reloaded) will start the service if it is not already started, even if your chosen init system would not normally. enabled: type: bool required: false description: - Whether the service should start on boot. - At least one of O(state) and O(enabled) are required. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Example action to start service httpd, if not running community.general.simpleinit_msb: name: httpd @@ -80,7 +76,7 @@ EXAMPLES = ''' community.general.simpleinit_msb: name: httpd enabled: true -''' +""" import os import re diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py index 1604ffc11f..3216fded8b 100644 --- a/plugins/modules/sl_vm.py +++ b/plugins/modules/sl_vm.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sl_vm short_description: Create or cancel a virtual instance in SoftLayer description: @@ -121,7 +120,7 @@ options: disks: description: - List of disk sizes to be assigned to new virtual instance. - default: [ 25 ] + default: [25] type: list elements: int os_code: @@ -159,7 +158,7 @@ options: description: - Create, or cancel a virtual instance. - Specify V(present) for create, V(absent) to cancel. - choices: [ absent, present ] + choices: [absent, present] default: present type: str wait: @@ -173,102 +172,102 @@ options: default: 600 type: int requirements: - - softlayer >= 4.1.1 + - softlayer >= 4.1.1 author: -- Matt Colton (@mcltn) -''' + - Matt Colton (@mcltn) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Build instance hosts: localhost gather_facts: false tasks: - - name: Build instance request - community.general.sl_vm: - hostname: instance-1 - domain: anydomain.com - datacenter: dal09 - tags: ansible-module-test - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: [25] - os_code: UBUNTU_LATEST - wait: false + - name: Build instance request + community.general.sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: false - name: Build additional instances hosts: localhost gather_facts: false tasks: - - name: Build instances request - community.general.sl_vm: - hostname: "{{ item.hostname }}" - domain: "{{ item.domain }}" - datacenter: "{{ item.datacenter }}" - tags: "{{ item.tags }}" - hourly: "{{ item.hourly }}" - private: "{{ item.private }}" - dedicated: "{{ item.dedicated }}" - local_disk: "{{ item.local_disk }}" - cpus: "{{ item.cpus }}" - memory: "{{ item.memory }}" - disks: "{{ item.disks }}" - os_code: "{{ item.os_code }}" - ssh_keys: "{{ item.ssh_keys }}" - wait: "{{ item.wait }}" - with_items: - - hostname: instance-2 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: true - - hostname: instance-3 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: true + - name: Build instances request + community.general.sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true - name: Cancel instances hosts: localhost gather_facts: false tasks: - - name: Cancel by tag - community.general.sl_vm: - state: absent - tags: ansible-module-test -''' + - name: Cancel by tag + community.general.sl_vm: + state: absent + tags: ansible-module-test +""" # TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = '''# ''' +RETURN = """# """ import json import time diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py index 58893b0f42..b4e637f591 100644 --- a/plugins/modules/slack.py +++ b/plugins/modules/slack.py @@ -15,11 +15,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ +DOCUMENTATION = r""" module: slack short_description: Send Slack notifications description: - - The M(community.general.slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration + - The M(community.general.slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration. author: "Ramon de la Fuente (@ramondelafuente)" extends_documentation_fragment: - community.general.attributes @@ -32,51 +32,42 @@ options: domain: type: str description: - - Slack (sub)domain for your environment without protocol. (For example - V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated and may - be ignored. See token documentation for information. + - Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated + and may be ignored. See token documentation for information. token: type: str description: - - Slack integration token. This authenticates you to the slack service. - Make sure to use the correct type of token, depending on what method you use. - - "Webhook token: - Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In - Ansible 1.8 and above, Ansible adapts to the new slack API where tokens look - like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens - are in the new format then slack will ignore any value of domain. If - the token is in the old format the domain is required. Ansible has no - control of when slack will get rid of the old API. When slack does - that the old format will stop working. ** Please keep in mind the tokens - are not the API tokens but are the webhook tokens. In slack these are - found in the webhook URL which are obtained under the apps and integrations. - The incoming webhooks can be added in that area. In some cases this may - be locked by your Slack admin and you must request access. It is there - that the incoming webhooks can be added. The key is on the end of the - URL given to you in that section." - - "WebAPI token: - Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-) - or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. - See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." + - Slack integration token. This authenticates you to the slack service. Make sure to use the correct type of token, depending on what method + you use. + - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, Ansible adapts to the + new slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens are in the new format then slack will + ignore any value of domain. If the token is in the old format the domain is required. Ansible has no control of when slack will get rid + of the old API. When slack does that the old format will stop working. ** Please keep in mind the tokens are not the API tokens but are + the webhook tokens. In slack these are found in the webhook URL which are obtained under the apps and integrations. The incoming webhooks + can be added in that area. In some cases this may be locked by your Slack admin and you must request access. It is there that the incoming + webhooks can be added. The key is on the end of the URL given to you in that section.' + - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-) or V(xoxa-), + for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. See Slack's documentation + (U(https://api.slack.com/docs/token-types)) for more information." required: true msg: type: str description: - - Message to send. Note that the module does not handle escaping characters. - Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. - See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands should be converted + to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. channel: type: str description: - Channel to send the message to. If absent, the message goes to the channel selected for the O(token). thread_id: description: - - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading + - Optional. Timestamp of parent message to thread this message, see U(https://api.slack.com/docs/message-threading). type: str message_id: description: - Optional. Message ID to edit, instead of posting a new message. - - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) from previous task run. + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) from previous task + run. - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). type: str @@ -107,14 +98,13 @@ options: parse: type: str description: - - Setting for the message parser at Slack + - Setting for the message parser at Slack. choices: - 'full' - 'none' validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true color: @@ -140,11 +130,10 @@ options: type: str description: - Setting for automatically prepending a V(#) symbol on the passed in O(channel). - - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). - These prefixes only cover a small set of the prefixes that should not have a V(#) prepended. - Since an exact condition which O(channel) values must not have the V(#) prefix is not known, - the value V(auto) for this option will be deprecated in the future. It is best to explicitly set - O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior. + - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These prefixes only cover + a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which O(channel) values must not have the + V(#) prefix is not known, the value V(auto) for this option will be deprecated in the future. It is best to explicitly set O(prepend_hash=always) + or O(prepend_hash=never) to obtain the needed behavior. choices: - 'always' - 'never' @@ -153,7 +142,7 @@ options: version_added: 6.1.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Slack community.general.slack: token: thetoken/generatedby/slack @@ -215,14 +204,14 @@ EXAMPLES = """ Display my system load on host A and B - type: context elements: - - type: mrkdwn - text: |- - *System A* - load average: 0,74, 0,66, 0,63 - - type: mrkdwn - text: |- - *System B* - load average: 5,16, 4,64, 2,43 + - type: mrkdwn + text: |- + *System A* + load average: 0,74, 0,66, 0,63 + - type: mrkdwn + text: |- + *System B* + load average: 5,16, 4,64, 2,43 - name: Send a message with a link using Slack markup community.general.slack: diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py index 9347db1591..2ec91de051 100644 --- a/plugins/modules/slackpkg.py +++ b/plugins/modules/slackpkg.py @@ -15,49 +15,47 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: slackpkg short_description: Package manager for Slackware >= 12.2 description: - - Manage binary packages for Slackware using 'slackpkg' which - is available in versions after 12.2. + - Manage binary packages for Slackware using C(slackpkg) which is available in versions after 12.2. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - name of package to install/remove - required: true - type: list - elements: str - aliases: [pkg] + name: + description: + - Name of package to install/remove. + required: true + type: list + elements: str + aliases: [pkg] - state: - description: - - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). - choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ] - required: false - default: present - type: str + state: + description: + - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). + choices: ['present', 'absent', 'latest', 'installed', 'removed'] + required: false + default: present + type: str - update_cache: - description: - - update the package database first - required: false - default: false - type: bool + update_cache: + description: + - Update the package database first. + required: false + default: false + type: bool author: Kim Nørgaard (@KimNorgaard) -requirements: [ "Slackware >= 12.2" ] -''' +requirements: ["Slackware >= 12.2"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.slackpkg: name: foo @@ -72,7 +70,7 @@ EXAMPLES = ''' community.general.slackpkg: name: foo state: latest -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py index 1a25b46681..96bf9b0575 100644 --- a/plugins/modules/smartos_image_info.py +++ b/plugins/modules/smartos_image_info.py @@ -9,31 +9,28 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: smartos_image_info short_description: Get SmartOS image details description: - - Retrieve information about all installed images on SmartOS. + - Retrieve information about all installed images on SmartOS. author: Adam Števko (@xen0l) extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - filters: - description: - - Criteria for selecting image. Can be any value from image - manifest and C(published_date), C(published), C(source), C(clones), - and C(size). More information can be found at U(https://smartos.org/man/1m/imgadm) - under C(imgadm list). - type: str -''' + filters: + description: + - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), V(clones), and V(size). + - More information can be found at U(https://smartos.org/man/1m/imgadm) under C(imgadm list). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Return information about all installed images community.general.smartos_image_info: register: result @@ -49,19 +46,17 @@ EXAMPLES = ''' - name: Print information ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} - has {{ result.smartos_images[item]['clones'] }} VM(s)" + msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} has {{ result.smartos_images[item]['clones'] }} VM(s)" with_items: "{{ result.smartos_images.keys() | list }}" - name: Print information ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} - has {{ smartos_images[item]['clones'] }} VM(s)" + msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} has {{ smartos_images[item]['clones'] }} VM(s)" with_items: "{{ smartos_images.keys() | list }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index 15637f3315..1de829801d 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -13,96 +13,87 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: snap short_description: Manages snaps description: - - Manages snaps packages. + - Manages snaps packages. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the snaps to be installed. - - Any named snap accepted by the C(snap) command is valid. - - > - O(dangerous=true) may be necessary when installing `.snap` files. See O(dangerous) for more details. - required: true - type: list - elements: str - state: - description: - - Desired state of the package. - - > - When O(state=present) the module will use C(snap install) if the snap is not installed, - and C(snap refresh) if it is installed but from a different channel. - default: present - choices: [ absent, present, enabled, disabled ] - type: str - classic: - description: - - Install a snap that has classic confinement. - - This option corresponds to the C(--classic) argument of the C(snap install) command. - - This level of confinement is permissive, granting full system access, - similar to that of traditionally packaged applications that do not use sandboxing mechanisms. - This option can only be specified when the task involves a single snap. - - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. - - type: bool - required: false - default: false - channel: - description: - - Define which release of a snap is installed and tracked for updates. - This option can only be specified if there is a single snap in the task. - - If not passed, the C(snap) command will default to V(stable). - - If the value passed does not contain the C(track), it will default to C(latest). - For example, if V(edge) is passed, the module will assume the channel to be V(latest/edge). - - See U(https://snapcraft.io/docs/channels) for more details about snap channels. - type: str - required: false - options: - description: - - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied - to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will - only be applied to active snaps. - - Options will only be applied when C(state) is set to V(present). - This is done after the necessary installation - or refresh (upgrade/downgrade) of all the snaps listed in O(name). - - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. - - required: false - type: list - elements: str - version_added: 4.4.0 - dangerous: - description: - - Install the snap in dangerous mode, without validating its assertions and signatures. - - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. - - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. - type: bool - required: false - default: false - version_added: 7.2.0 + name: + description: + - Name of the snaps to be installed. + - Any named snap accepted by the C(snap) command is valid. + - O(dangerous=true) may be necessary when installing C(.snap) files. See O(dangerous) for more details. + required: true + type: list + elements: str + state: + description: + - Desired state of the package. + - When O(state=present) the module will use C(snap install) if the snap is not installed, and C(snap refresh) if it is installed but from + a different channel. + default: present + choices: [absent, present, enabled, disabled] + type: str + classic: + description: + - Install a snap that has classic confinement. + - This option corresponds to the C(--classic) argument of the C(snap install) command. + - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications that do not + use sandboxing mechanisms. This option can only be specified when the task involves a single snap. + - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. + type: bool + required: false + default: false + channel: + description: + - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is a single snap in the + task. + - If not passed, the C(snap) command will default to V(stable). + - If the value passed does not contain the C(track), it will default to C(latest). For example, if V(edge) is passed, the module will assume + the channel to be V(latest/edge). + - See U(https://snapcraft.io/docs/channels) for more details about snap channels. + type: str + required: false + options: + description: + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied to that snap only. If + the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will only be applied to active snaps. + - Options will only be applied when C(state) is set to V(present). This is done after the necessary installation or refresh (upgrade/downgrade) + of all the snaps listed in O(name). + - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. + required: false + type: list + elements: str + version_added: 4.4.0 + dangerous: + description: + - Install the snap in dangerous mode, without validating its assertions and signatures. + - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. + - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. + type: bool + required: false + default: false + version_added: 7.2.0 notes: - - Privileged operations, such as installing and configuring snaps, require root priviledges. - This is only the case if the user has not logged in to the Snap Store. - + - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the user has not logged + in to the Snap Store. author: - - Victor Carceler (@vcarceler) - - Stanislas Lange (@angristan) + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) seealso: - - module: community.general.snap_alias -''' + - module: community.general.snap_alias +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install "foo" and "bar" snap - name: Install foo community.general.snap: @@ -147,35 +138,35 @@ EXAMPLES = ''' community.general.snap: name: foo channel: latest/edge -''' +""" -RETURN = ''' +RETURN = r""" classic: - description: Whether or not the snaps were installed with the classic confinement - type: bool - returned: When snaps are installed + description: Whether or not the snaps were installed with the classic confinement. + type: bool + returned: When snaps are installed channel: - description: The channel the snaps were installed from - type: str - returned: When snaps are installed + description: The channel the snaps were installed from. + type: str + returned: When snaps are installed cmd: - description: The command that was executed on the host - type: str - returned: When changed is true + description: The command that was executed on the host. + type: str + returned: When changed is true snaps_installed: - description: The list of actually installed snaps - type: list - returned: When any snaps have been installed + description: The list of actually installed snaps. + type: list + returned: When any snaps have been installed snaps_removed: - description: The list of actually removed snaps - type: list - returned: When any snaps have been removed + description: The list of actually removed snaps. + type: list + returned: When any snaps have been removed options_changed: - description: The list of options set/changed in format C(snap:key=value). - type: list - returned: When any options have been changed/set - version_added: 4.4.0 -''' + description: The list of options set/changed in format C(snap:key=value). + type: list + returned: When any options have been changed/set + version_added: 4.4.0 +""" import re import json diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py index ba54a9e155..81a968730d 100644 --- a/plugins/modules/snap_alias.py +++ b/plugins/modules/snap_alias.py @@ -9,46 +9,45 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: snap_alias short_description: Manages snap aliases version_added: 4.0.0 description: - - "Manages snaps aliases." + - Manages snaps aliases. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - state: - description: - - Desired state of the alias. - type: str - choices: [ absent, present ] - default: present - name: - description: - - Name of the snap. - type: str - alias: - description: - - Aliases to be created or removed. - type: list - elements: str - aliases: [aliases] + state: + description: + - Desired state of the alias. + type: str + choices: [absent, present] + default: present + name: + description: + - Name of the snap. + type: str + alias: + description: + - Aliases to be created or removed. + type: list + elements: str + aliases: [aliases] author: - - Alexei Znamensky (@russoz) + - Alexei Znamensky (@russoz) seealso: - - module: community.general.snap -''' + - module: community.general.snap +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install "foo" and "bar" snap - name: Create snap alias community.general.snap_alias: @@ -62,7 +61,7 @@ EXAMPLES = ''' - hw - hw2 - hw3 - state: present # optional + state: present # optional - name: Remove one specific aliases community.general.snap_alias: @@ -73,15 +72,15 @@ EXAMPLES = ''' community.general.snap_alias: name: hello-world state: absent -''' +""" -RETURN = ''' +RETURN = r""" snap_aliases: - description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. - type: list - elements: str - returned: always -''' + description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. + type: list + elements: str + returned: always +""" import re diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index d561f93f02..af0abf9479 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -9,87 +9,85 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: snmp_facts author: -- Patrick Ogenstad (@ogenstad) + - Patrick Ogenstad (@ogenstad) short_description: Retrieve facts for a device using SNMP description: - - Retrieve facts for a device using SNMP, the facts will be - inserted to the ansible_facts key. + - Retrieve facts for a device using SNMP, the facts will be inserted to the C(ansible_facts) key. requirements: - - pysnmp + - pysnmp extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.facts - - community.general.attributes.facts_module + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - host: - description: - - Set to target SNMP server (normally C({{ inventory_hostname }})). - type: str - required: true - version: - description: - - SNMP Version to use, V(v2), V(v2c) or V(v3). - type: str - required: true - choices: [ v2, v2c, v3 ] - community: - description: - - The SNMP community string, required if O(version) is V(v2) or V(v2c). - type: str - level: - description: - - Authentication level. - - Required if O(version=v3). - type: str - choices: [ authNoPriv, authPriv ] - username: - description: - - Username for SNMPv3. - - Required if O(version=v3). - type: str - integrity: - description: - - Hashing algorithm. - - Required if O(version=v3). - type: str - choices: [ md5, sha ] - authkey: - description: - - Authentication key. - - Required O(version=v3). - type: str - privacy: - description: - - Encryption algorithm. - - Required if O(level=authPriv). - type: str - choices: [ aes, des ] - privkey: - description: - - Encryption key. - - Required if O(level=authPriv). - type: str - timeout: - description: - - Response timeout in seconds. - type: int - version_added: 2.3.0 - retries: - description: - - Maximum number of request retries, 0 retries means just a single request. - type: int - version_added: 2.3.0 -''' + host: + description: + - Set to target SNMP server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, V(v2), V(v2c) or V(v3). + type: str + required: true + choices: [v2, v2c, v3] + community: + description: + - The SNMP community string, required if O(version) is V(v2) or V(v2c). + type: str + level: + description: + - Authentication level. + - Required if O(version=v3). + type: str + choices: [authNoPriv, authPriv] + username: + description: + - Username for SNMPv3. + - Required if O(version=v3). + type: str + integrity: + description: + - Hashing algorithm. + - Required if O(version=v3). + type: str + choices: [md5, sha] + authkey: + description: + - Authentication key. + - Required O(version=v3). + type: str + privacy: + description: + - Encryption algorithm. + - Required if O(level=authPriv). + type: str + choices: [aes, des] + privkey: + description: + - Encryption key. + - Required if O(level=authPriv). + type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts with SNMP version 2 community.general.snmp_facts: host: '{{ inventory_hostname }}' @@ -108,9 +106,9 @@ EXAMPLES = r''' authkey: abc12345 privkey: def6789 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" ansible_sysdescr: description: A textual description of the entity. returned: success @@ -152,39 +150,39 @@ ansible_interfaces: type: dict sample: { "1": { - "adminstatus": "up", - "description": "", - "ifindex": "1", - "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } - ], - "mac": "", - "mtu": "65536", - "name": "lo", - "operstatus": "up", - "speed": "65536" + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" }, "2": { - "adminstatus": "up", - "description": "", - "ifindex": "2", - "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } - ], - "mac": "000a305a52a1", - "mtu": "1500", - "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", - "operstatus": "up", - "speed": "1500" + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" } } -''' +""" import binascii from collections import defaultdict From 6b7ea3443d7f48aec832f781e5fb2a335ff3d466 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 20:24:16 +1300 Subject: [PATCH 412/482] [prox ... pyth]*: normalize docs (#9364) * [prox ... pyth]*: normalize docs * Apply suggestions from code review Co-authored-by: IamLunchbox <56757745+IamLunchbox@users.noreply.github.com> * Update plugins/modules/pushbullet.py Co-authored-by: Felix Fontein * Update plugins/modules/pushbullet.py Co-authored-by: Felix Fontein --------- Co-authored-by: IamLunchbox <56757745+IamLunchbox@users.noreply.github.com> Co-authored-by: Felix Fontein --- plugins/modules/proxmox.py | 71 ++++---- plugins/modules/proxmox_backup.py | 53 +++--- plugins/modules/proxmox_disk.py | 60 +++---- plugins/modules/proxmox_domain_info.py | 55 +++--- plugins/modules/proxmox_group_info.py | 51 +++--- plugins/modules/proxmox_kvm.py | 166 +++++++++--------- plugins/modules/proxmox_nic.py | 19 +- plugins/modules/proxmox_node_info.py | 127 +++++++------- plugins/modules/proxmox_pool.py | 11 +- plugins/modules/proxmox_pool_member.py | 9 +- plugins/modules/proxmox_snap.py | 24 +-- .../modules/proxmox_storage_contents_info.py | 7 +- plugins/modules/proxmox_storage_info.py | 29 ++- plugins/modules/proxmox_tasks_info.py | 119 +++++++------ plugins/modules/proxmox_template.py | 15 +- plugins/modules/proxmox_user_info.py | 159 +++++++++-------- plugins/modules/proxmox_vm_info.py | 7 +- plugins/modules/pubnub_blocks.py | 100 ++++------- plugins/modules/pulp_repo.py | 82 ++++----- plugins/modules/puppet.py | 45 +++-- plugins/modules/pushbullet.py | 96 +++++----- plugins/modules/pushover.py | 19 +- plugins/modules/python_requirements_info.py | 38 ++-- 23 files changed, 633 insertions(+), 729 deletions(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 52d5a849f3..3925eec090 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox short_description: Management of instances in Proxmox VE cluster description: @@ -26,28 +25,26 @@ attributes: options: password: description: - - the instance root password + - The instance root password. type: str hostname: description: - - the instance hostname - - required only for O(state=present) - - must be unique if vmid is not passed + - The instance hostname. + - Required only for O(state=present). + - Must be unique if vmid is not passed. type: str ostemplate: description: - - the template for VM creating - - required only for O(state=present) + - The template for VM creating. + - Required only for O(state=present). type: str disk: description: - - This option was previously described as "hard disk size in GB for instance" however several formats describing - a lxc mount are permitted. - - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically - choose which storage to allocate from, however new versions enforce the C(:) syntax. - - "Additional options are available by using some combination of the following key-value pairs as a - comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] - [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." + - This option was previously described as "hard disk size in GB for instance" however several formats describing a lxc mount are permitted. + - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically choose which storage to + allocate from, however new versions enforce the C(:) syntax. + - Additional options are available by using some combination of the following key-value pairs as a comma-delimited list C([volume=] + [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=]). - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - This option is mutually exclusive with O(storage) and O(disk_volume). type: str @@ -93,19 +90,19 @@ options: type: int cpus: description: - - numbers of allocated cpus for instance + - Number of allocated cpus for instance. type: int memory: description: - - memory size in MB for instance + - Memory size in MB for instance. type: int swap: description: - - swap memory size in MB for instance + - Swap memory size in MB for instance. type: int netif: description: - - specifies network interfaces for the container. As a hash/dictionary defining interfaces. + - Specifies network interfaces for the container. As a hash/dictionary defining interfaces. type: dict features: description: @@ -177,11 +174,11 @@ options: type: dict ip_address: description: - - specifies the address the container will be assigned + - Specifies the address the container will be assigned. type: str onboot: description: - - specifies whether a VM will be started during system bootup + - Specifies whether a VM will be started during system bootup. type: bool storage: description: @@ -199,15 +196,15 @@ options: version_added: 8.1.0 cpuunits: description: - - CPU weight for a VM + - CPU weight for a VM. type: int nameserver: description: - - sets DNS server IP address for a container + - Sets DNS server IP address for a container. type: str searchdomain: description: - - sets DNS search domain for a container + - Sets DNS search domain for a container. type: str tags: description: @@ -219,7 +216,7 @@ options: version_added: 6.2.0 timeout: description: - - timeout for operations + - Timeout for operations. type: int default: 30 update: @@ -232,8 +229,8 @@ options: description: - Forcing operations. - Can be used only with states V(present), V(stopped), V(restarted). - - with O(state=present) force option allow to overwrite existing container. - - with states V(stopped), V(restarted) allow to force stop instance. + - With O(state=present) force option allow to overwrite existing container. + - With states V(stopped), V(restarted) allow to force stop instance. type: bool default: false purge: @@ -247,14 +244,14 @@ options: version_added: 2.3.0 state: description: - - Indicate desired state of the instance - - V(template) was added in community.general 8.1.0. + - Indicate desired state of the instance. + - V(template) was added in community.general 8.1.0. type: str choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'template'] default: present pubkey: description: - - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions + - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions. type: str unprivileged: description: @@ -292,8 +289,8 @@ options: - Type of the clone created. - V(full) creates a full clone, and O(storage) must be specified. - V(linked) creates a linked clone, and the cloned container must be a template container. - - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. - O(storage) may be specified, if not it will fall back to the default. + - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. O(storage) may be specified, + if not it will fall back to the default. type: str choices: ['full', 'linked', 'opportunistic'] default: opportunistic @@ -306,9 +303,9 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.proxmox.selection - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create new container with minimal options community.general.proxmox: vmid: 100 @@ -494,8 +491,8 @@ EXAMPLES = r''' hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' features: - - nesting=1 - - mount=cifs,nfs + - nesting=1 + - mount=cifs,nfs - name: > Create a linked clone of the template container with id 100. The newly created container with be a @@ -599,7 +596,7 @@ EXAMPLES = r''' api_password: 1q2w3e api_host: node1 state: absent -''' +""" import re import time diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py index 0db2c4ad0e..b14dd529e8 100644 --- a/plugins/modules/proxmox_backup.py +++ b/plugins/modules/proxmox_backup.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_backup author: "Raphael Grieger (@IamLunchbox) " short_description: Start a VM backup in Proxmox VE cluster @@ -42,11 +41,9 @@ options: change_detection_mode: description: - Set the change detection mode (available from Proxmox VE 8.3). - - > - Is only used when backing up containers, - Proxmox silently ignores this option when applied to kvm guests. + - It is only used when backing up containers, Proxmox silently ignores this option when applied to kvm guests. type: str - choices: ["legacy", "data", "metadata"] + choices: ["legacy", "data", "metadata"] compress: description: - Enable additional compression of the backup archive. @@ -63,13 +60,9 @@ options: description: - Specify the description of the backup. - Needs to be a single line, newline and backslash need to be escaped as V(\\n) and V(\\\\) respectively. - - > - If you need variable interpolation, you can set the content as usual - through ansible jinja templating and/or let Proxmox substitute templates. - - > - Proxmox currently supports V({{cluster}}), V({{guestname}}), - V({{node}}), and V({{vmid}}) as templating variables. - Since this is also a jinja delimiter, you need to set these values as raw jinja. + - If you need variable interpolation, you can set the content as usual through ansible jinja templating and/or let Proxmox substitute templates. + - Proxmox currently supports V({{cluster}}), V({{guestname}}), V({{node}}), and V({{vmid}}) as templating variables. Since this is also + a jinja delimiter, you need to set these values as raw jinja. default: "{{guestname}}" type: str fleecing: @@ -93,13 +86,13 @@ options: description: - Determine which notification system to use. type: str - choices: ["auto","legacy-sendmail", "notification-system"] + choices: ["auto", "legacy-sendmail", "notification-system"] default: auto performance_tweaks: description: - Enable other performance-related settings. - Must be entered as a string, containing comma separated key-value pairs. - - "For example: V(max-workers=2,pbs-entries-max=2)." + - 'For example: V(max-workers=2,pbs-entries-max=2).' type: str pool: description: @@ -110,19 +103,14 @@ options: protected: description: - Marks backups as protected. - - > - "Might fail, when the PBS backend has verify enabled - due to this bug: U(https://bugzilla.proxmox.com/show_bug.cgi?id=4289)" + - '"Might fail, when the PBS backend has verify enabled due to this bug: U(https://bugzilla.proxmox.com/show_bug.cgi?id=4289)".' type: bool retention: description: - - > - Use custom retention options instead of those from the default cluster - configuration (which is usually V("keep-all")). + - Use custom retention options instead of those from the default cluster configuration (which is usually V("keep-all=1")). - Always requires Datastore.Allocate permission at the storage endpoint. - - > - Specifying a retention time other than V(keep-all=1) might trigger pruning on the datastore, - if an existing backup should be deleted target due to your specified timeframe. + - Specifying a retention time other than V(keep-all=1) might trigger pruning on the datastore, if an existing backup should be deleted + due to your specified timeframe. - Deleting requires C(Datastore.Modify) or C(Datastore.Prune) permissions on the backup storage. type: str storage: @@ -153,9 +141,9 @@ extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Backup all vms in the Proxmox cluster to storage mypbs community.general.proxmox_backup: api_user: root@pam @@ -204,9 +192,9 @@ EXAMPLES = r''' vmids: - 100 - 101 -''' +""" -RETURN = r''' +RETURN = r""" backups: description: List of nodes and their task IDs. returned: on success @@ -223,13 +211,12 @@ backups: type: str choices: ["unknown", "success", "failed"] upid: - description: > - Proxmox cluster UPID, which is needed to lookup task info. - Returns OK, when a cluster node did not create a task after being called, - e.g. due to no matching targets. + description: >- + Proxmox cluster UPID, which is needed to lookup task info. Returns OK, when a cluster node did not create a task after being called, for + example due to no matching targets. returned: on success type: str -''' +""" import time diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py index a4a9dd8791..289933915e 100644 --- a/plugins/modules/proxmox_disk.py +++ b/plugins/modules/proxmox_disk.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_disk short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster version_added: 5.7.0 @@ -38,31 +37,21 @@ options: description: - The disk key (V(unused[n]), V(ide[n]), V(sata[n]), V(scsi[n]) or V(virtio[n])) you want to operate on. - Disk buses (IDE, SATA and so on) have fixed ranges of V(n) that accepted by Proxmox API. - - > - For IDE: 0-3; - for SCSI: 0-30; - for SATA: 0-5; - for VirtIO: 0-15; - for Unused: 0-255. + - 'For IDE: 0-3; for SCSI: 0-30; for SATA: 0-5; for VirtIO: 0-15; for Unused: 0-255.' type: str required: true state: description: - Indicates desired state of the disk. - - > - O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing - disk or update options in existing one by default. See the O(create) parameter description to control behavior - of this option. + - O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing disk or update options + in existing one by default. See the O(create) parameter description to control behavior of this option. - Some updates on options (like O(cache)) are not being applied instantly and require VM restart. - - > - Use O(state=detached) to detach existing disk from VM but do not remove it entirely. - When O(state=detached) and disk is V(unused[n]) it will be left in same state (not removed). - - > - O(state=moved) may be used to change backing storage for the disk in bounds of the same VM - or to send the disk to another VM (using the same backing storage). - - > - O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size - because shrinking disks is not supported by the PVE API and has to be done manually. + - Use O(state=detached) to detach existing disk from VM but do not remove it entirely. When O(state=detached) and disk is V(unused[n]) it + will be left in same state (not removed). + - O(state=moved) may be used to change backing storage for the disk in bounds of the same VM or to send the disk to another VM (using the + same backing storage). + - O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size because shrinking disks is not + supported by the PVE API and has to be done manually. - To entirely remove the disk from backing storage use O(state=absent). type: str choices: ['present', 'resized', 'detached', 'moved', 'absent'] @@ -84,10 +73,8 @@ options: size: description: - Desired volume size in GB to allocate when O(state=present) (specify O(size) without suffix). - - > - New (or additional) size of volume when O(state=resized). With the V(+) sign - the value is added to the actual size of the volume - and without it, the value is taken as an absolute one. + - New (or additional) size of volume when O(state=resized). With the V(+) sign the value is added to the actual size of the volume and without + it, the value is taken as an absolute one. type: str bwlimit: description: @@ -176,8 +163,8 @@ options: import_from: description: - Import volume from this existing one. - - Volume string format - - C(:/) or C(/) + - Volume string format. + - V(:/) or V(/). - Attention! Only root can use absolute paths. - This parameter is mutually exclusive with O(size). - Increase O(timeout) parameter when importing large disk images or using slow storage. @@ -223,7 +210,7 @@ options: type: int iothread: description: - - Whether to use iothreads for this drive (only for SCSI and VirtIO) + - Whether to use iothreads for this drive (only for SCSI and VirtIO). type: bool mbps: description: @@ -262,10 +249,9 @@ options: description: - The ISO image to be mounted on the specified in O(disk) CD-ROM. - O(media=cdrom) needs to be specified for this option to work. - - "Image string format:" - - V(:iso/) to mount ISO. - - V(cdrom) to use physical CD/DVD drive. - - V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive. + - Use V(:iso/) to mount ISO. + - Use V(cdrom) to access the physical CD/DVD drive. + - Use V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive. type: str version_added: 8.1.0 queues: @@ -330,9 +316,9 @@ extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new disk in VM (do not rewrite in case it exists already) community.general.proxmox_disk: api_host: node1 @@ -437,9 +423,9 @@ EXAMPLES = ''' media: cdrom iso_image: local:iso/favorite_distro_amd64.iso state: present -''' +""" -RETURN = ''' +RETURN = r""" vmid: description: The VM vmid. returned: success @@ -450,7 +436,7 @@ msg: returned: always type: str sample: "Disk scsi3 created in VM 101" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py index f3ff212bff..d9836da277 100644 --- a/plugins/modules/proxmox_domain_info.py +++ b/plugins/modules/proxmox_domain_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_domain_info short_description: Retrieve information about one or more Proxmox VE domains version_added: 1.3.0 @@ -31,10 +30,10 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List existing domains community.general.proxmox_domain_info: api_host: helldorado @@ -53,33 +52,33 @@ EXAMPLES = ''' api_token_secret: "{{ token_secret | default(omit) }}" domain: pve register: proxmox_domain_pve -''' +""" -RETURN = ''' +RETURN = r""" proxmox_domains: - description: List of authentication domains. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the realm. - returned: on success - type: str - realm: - description: Realm name. - returned: on success - type: str - type: - description: Realm type. - returned: on success - type: str - digest: - description: Realm hash. - returned: on success, can be absent - type: str -''' + description: List of authentication domains. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the realm. + returned: on success + type: str + realm: + description: Realm name. + returned: on success + type: str + type: + description: Realm type. + returned: on success + type: str + digest: + description: Realm hash. + returned: on success, can be absent + type: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py index eda1fe04d8..f62d467af8 100644 --- a/plugins/modules/proxmox_group_info.py +++ b/plugins/modules/proxmox_group_info.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_group_info short_description: Retrieve information about one or more Proxmox VE groups version_added: 1.3.0 description: - - Retrieve information about one or more Proxmox VE groups + - Retrieve information about one or more Proxmox VE groups. attributes: action_group: version_added: 9.0.0 @@ -31,10 +30,10 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List existing groups community.general.proxmox_group_info: api_host: helldorado @@ -53,30 +52,30 @@ EXAMPLES = ''' api_token_secret: "{{ token_secret | default(omit) }}" group: admin register: proxmox_group_admin -''' +""" -RETURN = ''' +RETURN = r""" proxmox_groups: - description: List of groups. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the group. - returned: on success, can be absent - type: str - groupid: - description: Group name. - returned: on success - type: str - users: - description: List of users in the group. - returned: on success - type: list - elements: str -''' + description: List of groups. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the group. + returned: on success, can be absent + type: str + groupid: + description: Group name. + returned: on success + type: str + users: + description: List of users in the group. + returned: on success + type: list + elements: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 0c9904873d..d495b08694 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_kvm short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster description: @@ -66,21 +65,21 @@ options: type: str bootdisk: description: - - 'Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+).' + - Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+). type: str cicustom: description: - - 'cloud-init: Specify custom files to replace the automatically generated ones at start.' + - 'Cloud-init: Specify custom files to replace the automatically generated ones at start.' type: str version_added: 1.3.0 cipassword: description: - - 'cloud-init: password of default user to create.' + - 'Cloud-init: password of default user to create.' type: str version_added: 1.3.0 citype: description: - - 'cloud-init: Specifies the cloud-init configuration format.' + - 'Cloud-init: Specifies the cloud-init configuration format.' - The default depends on the configured operating system type (V(ostype)). - We use the V(nocloud) format for Linux, and V(configdrive2) for Windows. type: str @@ -88,12 +87,12 @@ options: version_added: 1.3.0 ciupgrade: description: - - 'cloud-init: do an automatic package upgrade after the first boot.' + - 'Cloud-init: do an automatic package upgrade after the first boot.' type: bool version_added: 10.0.0 ciuser: description: - - 'cloud-init: username of default user to create.' + - 'Cloud-init: username of default user to create.' type: str version_added: 1.3.0 clone: @@ -110,13 +109,13 @@ options: type: str cpulimit: description: - - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. - - If the computer has 2 CPUs, it has total of '2' CPU time + - Specify if CPU usage will be limited. Value V(0) indicates no CPU limit. + - If the computer has 2 CPUs, it has total of '2' CPU time. type: int cpuunits: description: - Specify CPU weight for a VM. - - You can disable fair-scheduler configuration by setting this to 0 + - You can disable fair-scheduler configuration by setting this to V(0). type: int delete: description: @@ -144,24 +143,22 @@ options: type: str format: description: - - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, - section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest - version, tables 3 to 14) to find out format supported by the provided storage backend. + - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage + (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format supported by + the provided storage backend. type: str efitype: description: - V(efitype) indicates the size of the EFI disk. - V(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries. - - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable - Secure Boot + - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable Secure Boot. type: str choices: - 2m - 4m pre_enrolled_keys: description: - - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware - upon creation or not (0). + - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware upon creation or not (0). - If set to V(1), Secure Boot will also be enabled by default when the VM is created. type: bool version_added: 4.5.0 @@ -174,14 +171,13 @@ options: format: description: - Target drive's backing file's data format. - - Used only with clone + - Used only with clone. - Use O(format=unspecified) and O(full=false) for a linked clone. - - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see - U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. + - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) + for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - Not specifying this option is equivalent to setting it to V(unspecified). type: str - choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] + choices: ["cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified"] freeze: description: - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). @@ -190,7 +186,7 @@ options: description: - Create a full copy of all disk. This is always done when you clone a normal VM. - For VM templates, we try to create a linked clone by default. - - Used only with clone + - Used only with clone. type: bool default: true hookscript: @@ -202,11 +198,11 @@ options: description: - Specify a hash/dictionary of map host pci devices into guest. O(hostpci='{"key":"value", "key":"value"}'). - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). - - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). - - C(pcie=boolean) C(default=0) Choose the PCI-express bus (needs the q35 machine model). - - C(rombar=boolean) C(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. - - C(x-vga=boolean) C(default=0) Enable vfio-vga device support. + - Values allowed are - V("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). + - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is V(bus:dev.func) (hexadecimal numbers). + - V(pcie=boolean) V(default=0) Choose the PCI-express bus (needs the q35 machine model). + - V(rombar=boolean) V(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. + - V(x-vga=boolean) V(default=0) Enable vfio-vga device support. - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. type: dict hotplug: @@ -223,21 +219,21 @@ options: ide: description: - A hash/dictionary of volume used as IDE hard disk or CD-ROM. O(ide='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + - Keys allowed are - V(ide[n]) where 0 ≤ n ≤ 3. + - Values allowed are - V("storage:size,format=value"). + - V(storage) is the storage identifier where to create the disk. + - V(size) is the size of the disk in GB. + - V(format) is the drive's backing file's data format. V(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section + Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format + supported by the provided storage backend. type: dict ipconfig: description: - - 'cloud-init: Set the IP configuration.' + - 'Cloud-init: Set the IP configuration.' - A hash/dictionary of network ip configurations. O(ipconfig='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces. - - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]"). - - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.' + - Keys allowed are - V(ipconfig[n]) where 0 ≤ n ≤ network interfaces. + - Values allowed are - V("[gw=] [,gw6=] [,ip=] [,ip6=]"). + - 'Cloud-init: Specify IP addresses and gateways for the corresponding interface.' - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration. @@ -265,7 +261,7 @@ options: machine: description: - Specifies the Qemu machine type. - - 'Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)).' + - Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)). type: str memory: description: @@ -294,7 +290,7 @@ options: type: str nameservers: description: - - 'cloud-init: DNS server IP address(es).' + - 'Cloud-init: DNS server IP address(es).' - If unset, PVE host settings are used. type: list elements: str @@ -307,7 +303,8 @@ options: - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. - - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. + - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes + per second'. - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. type: dict newid: @@ -361,23 +358,23 @@ options: description: - A hash/dictionary of volume used as sata hard disk or CD-ROM. O(sata='{"key":"value", "key":"value"}'). - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. - - Values allowed are - C("storage:size,format=value"). + - Values allowed are - C("storage:size,format=value"). - C(storage) is the storage identifier where to create the disk. - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section + Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format + supported by the provided storage backend. type: dict scsi: description: - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. O(scsi='{"key":"value", "key":"value"}'). - Keys allowed are - C(scsi[n]) where 0 ≤ n ≤ 13. - - Values allowed are - C("storage:size,format=value"). + - Values allowed are - C("storage:size,format=value"). - C(storage) is the storage identifier where to create the disk. - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section + Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format + supported by the provided storage backend. type: dict scsihw: description: @@ -386,7 +383,7 @@ options: choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] searchdomains: description: - - 'cloud-init: Sets DNS search domain(s).' + - 'Cloud-init: Sets DNS search domain(s).' - If unset, PVE host settings are used. type: list elements: str @@ -407,20 +404,20 @@ options: type: int skiplock: description: - - Ignore locks + - Ignore locks. - Only root is allowed to use this option. type: bool smbios: description: - Specifies SMBIOS type 1 fields. - - "Comma separated, Base64 encoded (optional) SMBIOS properties:" - - V([base64=<1|0>] [,family=]) - - V([,manufacturer=]) - - V([,product=]) - - V([,serial=]) - - V([,sku=]) - - V([,uuid=]) - - V([,version=]) + - Comma separated, Base64 encoded (optional) SMBIOS properties:. + - V([base64=<1|0>] [,family=]). + - V([,manufacturer=]). + - V([,product=]). + - V([,serial=]). + - V([,sku=]). + - V([,uuid=]). + - V([,version=]). type: str snapname: description: @@ -432,7 +429,7 @@ options: type: int sshkeys: description: - - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' + - 'Cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' type: str version_added: 1.3.0 startdate: @@ -449,7 +446,7 @@ options: state: description: - Indicates desired state of the instance. - - If V(current), the current state of the VM will be fetched. You can access it with C(results.status) + - If V(current), the current state of the VM will be fetched. You can access it with C(results.status). - V(template) was added in community.general 8.1.0. type: str choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'current', 'template'] @@ -473,7 +470,7 @@ options: target: description: - Target node. Only allowed if the original VM is on shared storage. - - Used only with clone + - Used only with clone. type: str tdf: description: @@ -512,7 +509,7 @@ options: - A hash/dictionary of USB devices for the VM. O(usb='{"key":"value", "key":"value"}'). - Keys allowed are - C(usb[n]) where 0 ≤ n ≤ N. - Values allowed are - C(host="value|spice",mapping="value",usb3="1|0"). - - host is either C(spice) or the USB id/port. + - Host is either C(spice) or the USB id/port. - Option C(mapping) is the mapped USB device name. - Option C(usb3) enables USB 3 support. type: dict @@ -520,16 +517,16 @@ options: update: description: - If V(true), the VM will be updated with new value. - - Because of the operations of the API and security reasons, I have disabled the update of the following parameters - O(net), O(virtio), O(ide), O(sata), O(scsi). Per example updating O(net) update the MAC address and C(virtio) create always new disk... - This security feature can be disabled by setting the O(update_unsafe) to V(true). + - Because of the operations of the API and security reasons, I have disabled the update of the following parameters O(net), O(virtio), O(ide), + O(sata), O(scsi). Per example updating O(net) update the MAC address and O(virtio) create always new disk... This security feature can + be disabled by setting the O(update_unsafe) to V(true). - Update of O(pool) is disabled. It needs an additional API endpoint not covered by this module. type: bool default: false update_unsafe: description: - - If V(true), do not enforce limitations on parameters O(net), O(virtio), O(ide), O(sata), O(scsi), O(efidisk0), and O(tpmstate0). - Use this option with caution because an improper configuration might result in a permanent loss of data (e.g. disk recreated). + - If V(true), do not enforce limitations on parameters O(net), O(virtio), O(ide), O(sata), O(scsi), O(efidisk0), and O(tpmstate0). Use this + option with caution because an improper configuration might result in a permanent loss of data (for example disk recreated). type: bool default: false version_added: 8.4.0 @@ -545,13 +542,13 @@ options: virtio: description: - A hash/dictionary of volume used as VIRTIO hard disk. O(virtio='{"key":"value", "key":"value"}'). - - Keys allowed are - C(virtio[n]) where 0 ≤ n ≤ 15. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) - for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + - Keys allowed are - V(virtio[n]) where 0 ≤ n ≤ 15. + - Values allowed are - V("storage:size,format=value"). + - V(storage) is the storage identifier where to create the disk. + - V(size) is the size of the disk in GB. + - V(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section + Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format + supported by the provided storage backend. type: dict watchdog: description: @@ -564,9 +561,9 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.proxmox.selection - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new VM with minimal options community.general.proxmox_kvm: api_user: root@pam @@ -846,7 +843,7 @@ EXAMPLES = ''' cores: 8 memory: 16384 net: - net0: virtio,bridge=vmbr1 + net0: virtio,bridge=vmbr1 update: true update_unsafe: true @@ -886,10 +883,9 @@ EXAMPLES = ''' node: sabrewulf hookscript: local:snippets/hookscript.pl update: true +""" -''' - -RETURN = ''' +RETURN = r""" vmid: description: The VM vmid. returned: success @@ -901,11 +897,11 @@ status: type: str sample: running msg: - description: A short message + description: A short message. returned: always type: str sample: "VM kropta with vmid = 110 is running" -''' +""" import re import time diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py index 6e94ed0bb6..bcf23bc5a1 100644 --- a/plugins/modules/proxmox_nic.py +++ b/plugins/modules/proxmox_nic.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_nic short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster version_added: 3.1.0 @@ -52,8 +51,8 @@ options: description: - The NIC emulator model. type: str - choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'] + choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', + 'virtio', 'vmxnet3'] default: virtio mtu: description: @@ -99,9 +98,9 @@ extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create NIC net0 targeting the vm by name community.general.proxmox_nic: api_user: root@pam @@ -131,20 +130,20 @@ EXAMPLES = ''' name: my_vm interface: net0 state: absent -''' +""" -RETURN = ''' +RETURN = r""" vmid: description: The VM vmid. returned: success type: int sample: 115 msg: - description: A short message + description: A short message. returned: always type: str sample: "Nic net0 unchanged on VM with vmid 103" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) diff --git a/plugins/modules/proxmox_node_info.py b/plugins/modules/proxmox_node_info.py index 51d8745c05..e243862134 100644 --- a/plugins/modules/proxmox_node_info.py +++ b/plugins/modules/proxmox_node_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_node_info short_description: Retrieve information about one or more Proxmox VE nodes version_added: 8.2.0 @@ -25,10 +24,10 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List existing nodes community.general.proxmox_node_info: api_host: proxmox1 @@ -37,69 +36,69 @@ EXAMPLES = ''' api_token_id: "{{ token_id | default(omit) }}" api_token_secret: "{{ token_secret | default(omit) }}" register: proxmox_nodes -''' +""" -RETURN = ''' +RETURN = r""" proxmox_nodes: - description: List of Proxmox VE nodes. - returned: always, but can be empty - type: list - elements: dict - contains: - cpu: - description: Current CPU usage in fractional shares of this host's total available CPU. - returned: on success - type: float - disk: - description: Current local disk usage of this host. - returned: on success - type: int - id: - description: Identity of the node. - returned: on success - type: str - level: - description: Support level. Can be blank if not under a paid support contract. - returned: on success - type: str - maxcpu: - description: Total number of available CPUs on this host. - returned: on success - type: int - maxdisk: - description: Size of local disk in bytes. - returned: on success - type: int - maxmem: - description: Memory size in bytes. - returned: on success - type: int - mem: - description: Used memory in bytes. - returned: on success - type: int - node: - description: Short hostname of this node. - returned: on success - type: str - ssl_fingerprint: - description: SSL fingerprint of the node certificate. - returned: on success - type: str - status: - description: Node status. - returned: on success - type: str - type: - description: Object type being returned. - returned: on success - type: str - uptime: - description: Node uptime in seconds. - returned: on success - type: int -''' + description: List of Proxmox VE nodes. + returned: always, but can be empty + type: list + elements: dict + contains: + cpu: + description: Current CPU usage in fractional shares of this host's total available CPU. + returned: on success + type: float + disk: + description: Current local disk usage of this host. + returned: on success + type: int + id: + description: Identity of the node. + returned: on success + type: str + level: + description: Support level. Can be blank if not under a paid support contract. + returned: on success + type: str + maxcpu: + description: Total number of available CPUs on this host. + returned: on success + type: int + maxdisk: + description: Size of local disk in bytes. + returned: on success + type: int + maxmem: + description: Memory size in bytes. + returned: on success + type: int + mem: + description: Used memory in bytes. + returned: on success + type: int + node: + description: Short hostname of this node. + returned: on success + type: str + ssl_fingerprint: + description: SSL fingerprint of the node certificate. + returned: on success + type: str + status: + description: Node status. + returned: on success + type: str + type: + description: Object type being returned. + returned: on success + type: str + uptime: + description: Node uptime in seconds. + returned: on success + type: int +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/proxmox_pool.py b/plugins/modules/proxmox_pool.py index 5089ec3bef..c53e394eeb 100644 --- a/plugins/modules/proxmox_pool.py +++ b/plugins/modules/proxmox_pool.py @@ -8,7 +8,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r""" ---- module: proxmox_pool short_description: Pool management for Proxmox VE cluster description: @@ -28,12 +27,12 @@ options: description: - The pool ID. type: str - aliases: [ "name" ] + aliases: ["name"] required: true state: description: - - Indicate desired state of the pool. - - The pool must be empty prior deleting it with O(state=absent). + - Indicate desired state of the pool. + - The pool must be empty prior deleting it with O(state=absent). choices: ['present', 'absent'] default: present type: str @@ -49,7 +48,7 @@ extends_documentation_fragment: - community.general.attributes """ -EXAMPLES = """ +EXAMPLES = r""" - name: Create new Proxmox VE pool community.general.proxmox_pool: api_host: node1 @@ -67,7 +66,7 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" poolid: description: The pool ID. returned: success diff --git a/plugins/modules/proxmox_pool_member.py b/plugins/modules/proxmox_pool_member.py index b26082f975..bd32e94e42 100644 --- a/plugins/modules/proxmox_pool_member.py +++ b/plugins/modules/proxmox_pool_member.py @@ -8,7 +8,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r""" ---- module: proxmox_pool_member short_description: Add or delete members from Proxmox VE cluster pools description: @@ -27,7 +26,7 @@ options: description: - The pool ID. type: str - aliases: [ "name" ] + aliases: ["name"] required: true member: description: @@ -44,7 +43,7 @@ options: type: str state: description: - - Indicate desired state of the pool member. + - Indicate desired state of the pool member. choices: ['present', 'absent'] default: present type: str @@ -55,7 +54,7 @@ extends_documentation_fragment: - community.general.attributes """ -EXAMPLES = """ +EXAMPLES = r""" - name: Add new VM to Proxmox VE pool community.general.proxmox_pool_member: api_host: node1 @@ -93,7 +92,7 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" poolid: description: The pool ID. returned: success diff --git a/plugins/modules/proxmox_snap.py b/plugins/modules/proxmox_snap.py index 4f7b345b80..57dad92413 100644 --- a/plugins/modules/proxmox_snap.py +++ b/plugins/modules/proxmox_snap.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_snap short_description: Snapshot management of instances in Proxmox VE cluster version_added: 2.0.0 @@ -35,8 +34,8 @@ options: type: str state: description: - - Indicate desired state of the instance snapshot. - - The V(rollback) value was added in community.general 4.8.0. + - Indicate desired state of the instance snapshot. + - The V(rollback) value was added in community.general 4.8.0. choices: ['present', 'absent', 'rollback'] default: present type: str @@ -51,7 +50,8 @@ options: - Allows to snapshot a container even if it has configured mountpoints. - Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration. - If running, the container will be stopped and restarted to apply config changes. - - Due to restrictions in the Proxmox API this option can only be used authenticating as V(root@pam) with O(api_password), API tokens do not work either. + - Due to restrictions in the Proxmox API this option can only be used authenticating as V(root@pam) with O(api_password), API tokens do + not work either. - See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details. default: false type: bool @@ -80,23 +80,23 @@ options: description: - Remove old snapshots if there are more than O(retention) snapshots. - If O(retention) is set to V(0), all snapshots will be kept. - - This is only used when O(state=present) and when an actual snapshot is created. - If no snapshot is created, all existing snapshots will be kept. + - This is only used when O(state=present) and when an actual snapshot is created. If no snapshot is created, all existing snapshots will + be kept. default: 0 type: int version_added: 7.1.0 notes: - Requires proxmoxer and requests modules on host. These modules can be installed with pip. -requirements: [ "proxmoxer", "requests" ] +requirements: ["proxmoxer", "requests"] author: Jeffrey van Pelt (@Thulium-Drake) extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create new container snapshot community.general.proxmox_snap: api_user: root@pam @@ -143,9 +143,9 @@ EXAMPLES = r''' vmid: 100 state: rollback snapname: pre-updates -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" import time diff --git a/plugins/modules/proxmox_storage_contents_info.py b/plugins/modules/proxmox_storage_contents_info.py index b777870e54..e0e95565d7 100644 --- a/plugins/modules/proxmox_storage_contents_info.py +++ b/plugins/modules/proxmox_storage_contents_info.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: proxmox_storage_contents_info short_description: List content from a Proxmox VE storage version_added: 8.2.0 @@ -51,7 +50,7 @@ extends_documentation_fragment: """ -EXAMPLES = """ +EXAMPLES = r""" - name: List existing storages community.general.proxmox_storage_contents_info: api_host: helldorado @@ -65,7 +64,7 @@ EXAMPLES = """ """ -RETURN = """ +RETURN = r""" proxmox_storage_content: description: Content of of storage attached to a node. type: list diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py index fd5a6ee0d8..5b9b1b6aaa 100644 --- a/plugins/modules/proxmox_storage_info.py +++ b/plugins/modules/proxmox_storage_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_storage_info short_description: Retrieve information about one or more Proxmox VE storages version_added: 2.2.0 @@ -37,10 +36,10 @@ extends_documentation_fragment: - community.general.attributes.info_module notes: - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List existing storages community.general.proxmox_storage_info: api_host: helldorado @@ -69,10 +68,10 @@ EXAMPLES = ''' api_token_secret: "{{ token_secret | default(omit) }}" storage: lvm2 register: proxmox_storage_lvm -''' +""" -RETURN = ''' +RETURN = r""" proxmox_storages: description: List of storage pools. returned: on success @@ -80,41 +79,41 @@ proxmox_storages: elements: dict contains: content: - description: Proxmox content types available in this storage + description: Proxmox content types available in this storage. returned: on success type: list elements: str digest: - description: Storage's digest + description: Storage's digest. returned: on success type: str nodes: - description: List of nodes associated to this storage + description: List of nodes associated to this storage. returned: on success, if storage is not local type: list elements: str path: - description: Physical path to this storage + description: Physical path to this storage. returned: on success type: str prune-backups: - description: Backup retention options + description: Backup retention options. returned: on success type: list elements: dict shared: - description: Is this storage shared + description: Is this storage shared. returned: on success type: bool storage: - description: Storage name + description: Storage name. returned: on success type: str type: - description: Storage type + description: Storage type. returned: on success type: str -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py index 65a07566a8..574a971427 100644 --- a/plugins/modules/proxmox_tasks_info.py +++ b/plugins/modules/proxmox_tasks_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: proxmox_tasks_info short_description: Retrieve information about one or more Proxmox VE tasks version_added: 3.8.0 @@ -36,10 +35,10 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List tasks on node01 community.general.proxmox_tasks_info: api_host: proxmoxhost @@ -60,66 +59,66 @@ EXAMPLES = ''' task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' node: node01 register: proxmox_tasks -''' +""" -RETURN = ''' +RETURN = r""" proxmox_tasks: - description: List of tasks. - returned: on success - type: list - elements: dict - contains: - id: - description: ID of the task. - returned: on success - type: str - node: - description: Node name. - returned: on success - type: str - pid: - description: PID of the task. - returned: on success - type: int - pstart: - description: pastart of the task. - returned: on success - type: int - starttime: - description: Starting time of the task. - returned: on success - type: int - type: - description: Type of the task. - returned: on success - type: str - upid: - description: UPID of the task. - returned: on success - type: str - user: - description: User that owns the task. - returned: on success - type: str - endtime: - description: Endtime of the task. - returned: on success, can be absent - type: int - status: - description: Status of the task. - returned: on success, can be absent - type: str - failed: - description: If the task failed. - returned: when status is defined - type: bool + description: List of tasks. + returned: on success + type: list + elements: dict + contains: + id: + description: ID of the task. + returned: on success + type: str + node: + description: Node name. + returned: on success + type: str + pid: + description: PID of the task. + returned: on success + type: int + pstart: + description: Pastart of the task. + returned: on success + type: int + starttime: + description: Starting time of the task. + returned: on success + type: int + type: + description: Type of the task. + returned: on success + type: str + upid: + description: UPID of the task. + returned: on success + type: str + user: + description: User that owns the task. + returned: on success + type: str + endtime: + description: Endtime of the task. + returned: on success, can be absent + type: int + status: + description: Status of the task. + returned: on success, can be absent + type: str + failed: + description: If the task failed. + returned: when status is defined + type: bool msg: - description: Short message. - returned: on failure - type: str - sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' -''' + description: Short message. + returned: on failure + type: str + sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.proxmox import ( diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py index 876e8a6847..c994eff1f8 100644 --- a/plugins/modules/proxmox_template.py +++ b/plugins/modules/proxmox_template.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_template short_description: Management of OS templates in Proxmox VE cluster description: - - allows you to upload/delete templates in Proxmox VE cluster + - Allows you to upload/delete templates in Proxmox VE cluster. attributes: check_mode: support: none @@ -34,7 +33,7 @@ options: type: path url: description: - - URL to file to download + - URL to file to download. - Exactly one of O(src) or O(url) is required for O(state=present). type: str version_added: 10.1.0 @@ -68,7 +67,7 @@ options: default: false state: description: - - Indicate desired state of the template. + - Indicate desired state of the template. type: str choices: ['present', 'absent'] default: present @@ -80,9 +79,9 @@ extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Upload new openvz template with minimal options community.general.proxmox_template: node: uk-mc02 @@ -148,7 +147,7 @@ EXAMPLES = ''' storage: local content_type: vztmpl template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz -''' +""" import os import time diff --git a/plugins/modules/proxmox_user_info.py b/plugins/modules/proxmox_user_info.py index 8680dec7ca..a8da1ee30a 100644 --- a/plugins/modules/proxmox_user_info.py +++ b/plugins/modules/proxmox_user_info.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: proxmox_user_info short_description: Retrieve information about one or more Proxmox VE users version_added: 1.3.0 description: - - Retrieve information about one or more Proxmox VE users + - Retrieve information about one or more Proxmox VE users. attributes: action_group: version_added: 9.0.0 @@ -40,9 +39,9 @@ extends_documentation_fragment: - community.general.proxmox.documentation - community.general.attributes - community.general.attributes.info_module -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List existing users community.general.proxmox_user_info: api_host: helldorado @@ -82,84 +81,84 @@ EXAMPLES = ''' user: admin domain: pve register: proxmox_user_admin -''' +""" -RETURN = ''' +RETURN = r""" proxmox_users: - description: List of users. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the user. - returned: on success - type: str - domain: - description: User's authentication realm, also the right part of the user ID. - returned: on success - type: str - email: - description: User's email address. - returned: on success - type: str - enabled: - description: User's account state. - returned: on success - type: bool - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - firstname: - description: User's first name. - returned: on success - type: str - groups: - description: List of groups which the user is a member of. - returned: on success - type: list - elements: str - keys: - description: User's two factor authentication keys. - returned: on success - type: str - lastname: - description: User's last name. - returned: on success - type: str - tokens: - description: List of API tokens associated to the user. - returned: on success - type: list - elements: dict - contains: - comment: - description: Short description of the token. - returned: on success - type: str - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - privsep: - description: Describe if the API token is further restricted with ACLs or is fully privileged. - returned: on success - type: bool - tokenid: - description: Token name. - returned: on success - type: str - user: - description: User's login name, also the left part of the user ID. - returned: on success - type: str - userid: - description: Proxmox user ID, represented as user@realm. - returned: on success - type: str -''' + description: List of users. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the user. + returned: on success + type: str + domain: + description: User's authentication realm, also the right part of the user ID. + returned: on success + type: str + email: + description: User's email address. + returned: on success + type: str + enabled: + description: User's account state. + returned: on success + type: bool + expire: + description: Expiration date in seconds since EPOCH. Zero means no expiration. + returned: on success + type: int + firstname: + description: User's first name. + returned: on success + type: str + groups: + description: List of groups which the user is a member of. + returned: on success + type: list + elements: str + keys: + description: User's two factor authentication keys. + returned: on success + type: str + lastname: + description: User's last name. + returned: on success + type: str + tokens: + description: List of API tokens associated to the user. + returned: on success + type: list + elements: dict + contains: + comment: + description: Short description of the token. + returned: on success + type: str + expire: + description: Expiration date in seconds since EPOCH. Zero means no expiration. + returned: on success + type: int + privsep: + description: Describe if the API token is further restricted with ACLs or is fully privileged. + returned: on success + type: bool + tokenid: + description: Token name. + returned: on success + type: str + user: + description: User's login name, also the left part of the user ID. + returned: on success + type: str + userid: + description: Proxmox user ID, represented as user@realm. + returned: on success + type: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py index e10b9dff6f..36ddea9db8 100644 --- a/plugins/modules/proxmox_vm_info.py +++ b/plugins/modules/proxmox_vm_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: proxmox_vm_info short_description: Retrieve information about one or more Proxmox VE virtual machines version_added: 7.2.0 @@ -71,7 +70,7 @@ extends_documentation_fragment: - community.general.attributes.info_module """ -EXAMPLES = """ +EXAMPLES = r""" - name: List all existing virtual machines on node community.general.proxmox_vm_info: api_host: proxmoxhost @@ -108,7 +107,7 @@ EXAMPLES = """ config: current """ -RETURN = """ +RETURN = r""" proxmox_vms: description: List of virtual machines. returned: on success diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py index 34098873a1..598b6b5af3 100644 --- a/plugins/modules/pubnub_blocks.py +++ b/plugins/modules/pubnub_blocks.py @@ -14,15 +14,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pubnub_blocks short_description: PubNub blocks management module description: - - "This module allows Ansible to interface with the PubNub BLOCKS - infrastructure by providing the following operations: create / remove, - start / stop and rename for blocks and create / modify / remove for event - handlers." + - 'This module allows Ansible to interface with the PubNub BLOCKS infrastructure by providing the following operations: create / remove, start + / stop and rename for blocks and create / modify / remove for event handlers.' author: - PubNub (@pubnub) - Sergey Mamontov (@parfeon) @@ -39,38 +36,33 @@ options: email: description: - Email from account for which new session should be started. - - "Not required if O(cache) contains result of previous module call (in - same play)." + - Not required if O(cache) contains result of previous module call (in same play). required: false type: str default: '' password: description: - Password which match to account to which specified O(email) belong. - - "Not required if O(cache) contains result of previous module call (in - same play)." + - Not required if O(cache) contains result of previous module call (in same play). required: false type: str default: '' cache: - description: > - In case if single play use blocks management module few times it is - preferred to enabled 'caching' by making previous module to share - gathered artifacts and pass them to this parameter. + description: >- + In case if single play use blocks management module few times it is preferred to enabled 'caching' by making previous module to share gathered + artifacts and pass them to this parameter. required: false type: dict default: {} account: description: - - "Name of PubNub account for from which O(application) will be used to - manage blocks." - - "User's account will be used if value not set or empty." + - Name of PubNub account for from which O(application) will be used to manage blocks. + - User's account will be used if value not set or empty. type: str default: '' application: description: - - "Name of target PubNub application for which blocks configuration on - specific O(keyset) will be done." + - Name of target PubNub application for which blocks configuration on specific O(keyset) will be done. type: str required: true keyset: @@ -80,8 +72,7 @@ options: required: true state: description: - - "Intended block state after event handlers creation / update process - will be completed." + - Intended block state after event handlers creation / update process will be completed. required: false default: 'present' choices: ['started', 'stopped', 'present', 'absent'] @@ -93,55 +84,45 @@ options: type: str description: description: - - Short block description which will be later visible on - admin.pubnub.com. Used only if block doesn't exists and won't change - description for existing block. + - Short block description which will be later visible on admin.pubnub.com. Used only if block doesn't exists and won't change description + for existing block. required: false type: str event_handlers: description: - - "List of event handlers which should be updated for specified block - O(name)." - - "Each entry for new event handler should contain: C(name), C(src), - C(channels), C(event). C(name) used as event handler name which can be - used later to make changes to it." + - List of event handlers which should be updated for specified block O(name). + - 'Each entry for new event handler should contain: V(name), V(src), V(channels), V(event). V(name) used as event handler name which can + be used later to make changes to it.' - C(src) is full path to file with event handler code. - - "C(channels) is name of channel from which event handler is waiting - for events." - - "C(event) is type of event which is able to trigger event handler: - C(js-before-publish), C(js-after-publish), C(js-after-presence)." - - "Each entry for existing handlers should contain C(name) (so target - handler can be identified). Rest parameters (C(src), C(channels) and - C(event)) can be added if changes required for them." - - "It is possible to rename event handler by adding C(changes) key to - event handler payload and pass dictionary, which will contain single key - C(name), where new name should be passed." - - "To remove particular event handler it is possible to set C(state) for - it to C(absent) and it will be removed." + - V(channels) is name of channel from which event handler is waiting for events. + - 'V(event) is type of event which is able to trigger event handler: V(js-before-publish), V(js-after-publish), V(js-after-presence).' + - Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src), C(channels) and + C(event)) can be added if changes required for them. + - It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which will contain single + key C(name), where new name should be passed. + - To remove particular event handler it is possible to set C(state) for it to C(absent) and it will be removed. required: false default: [] type: list elements: dict changes: description: - - "List of fields which should be changed by block itself (doesn't - affect any event handlers)." - - "Possible options for change is: O(name)." + - List of fields which should be changed by block itself (does not affect any event handlers). + - 'Possible options for change is: O(name).' required: false default: {} type: dict validate_certs: description: - - "This key allow to try skip certificates check when performing REST API - calls. Sometimes host may have issues with certificates on it and this - will cause problems to call PubNub REST API." + - This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with certificates on it and + this will cause problems to call PubNub REST API. - If check should be ignored V(false) should be passed to this parameter. required: false default: true type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Event handler create example. - name: Create single event handler community.general.pubnub_blocks: @@ -151,8 +132,7 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' event_handlers: - - - src: '{{ path_to_handler_source }}' + - src: '{{ path_to_handler_source }}' name: '{{ handler_name }}' event: 'js-before-publish' channels: '{{ handler_channel }}' @@ -166,8 +146,7 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' event_handlers: - - - name: '{{ handler_name }}' + - name: '{{ handler_name }}' event: 'js-after-publish' # Stop block and event handlers. @@ -199,8 +178,7 @@ EXAMPLES = ''' name: '{{ block_name }}' state: present event_handlers: - - - src: '{{ path_to_handler_1_source }}' + - src: '{{ path_to_handler_1_source }}' name: '{{ event_handler_1_name }}' channels: '{{ event_handler_1_channel }}' event: 'js-before-publish' @@ -213,8 +191,7 @@ EXAMPLES = ''' name: '{{ block_name }}' state: present event_handlers: - - - src: '{{ path_to_handler_2_source }}' + - src: '{{ path_to_handler_2_source }}' name: '{{ event_handler_2_name }}' channels: '{{ event_handler_2_channel }}' event: 'js-before-publish' @@ -226,17 +203,16 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' state: started -''' +""" -RETURN = ''' +RETURN = r""" module_cache: description: - - Cached account information. In case if with single play module - used few times it is better to pass cached data to next module calls to speed + - Cached account information. In case if with single play module used few times it is better to pass cached data to next module calls to speed up process. type: dict returned: always -''' +""" import copy import os diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py index c581fa3187..142c5f66f4 100644 --- a/plugins/modules/pulp_repo.py +++ b/plugins/modules/pulp_repo.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pulp_repo author: "Joe Adams (@sysadmind)" short_description: Add or remove Pulp repos from a remote host @@ -35,78 +34,68 @@ options: type: str force_basic_auth: description: - - httplib2, the library used by the M(ansible.builtin.uri) module only sends - authentication information when a webservice responds to an initial - request with a 401 status. Since some basic auth services do not - properly send a 401, logins will fail. This option forces the sending of - the Basic authentication header upon initial request. + - C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice responds to an + initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This option forces the + sending of the Basic authentication header upon initial request. type: bool default: false generate_sqlite: description: - - Boolean flag to indicate whether sqlite files should be generated during - a repository publish. + - Boolean flag to indicate whether sqlite files should be generated during a repository publish. required: false type: bool default: false feed_ca_cert: description: - - CA certificate string used to validate the feed source SSL certificate. - This can be the file content or the path to the file. + - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to the file. type: str - aliases: [ importer_ssl_ca_cert ] + aliases: [importer_ssl_ca_cert] feed_client_cert: description: - - Certificate used as the client certificate when synchronizing the - repository. This is used to communicate authentication information to - the feed source. The value to this option must be the full path to the - certificate. The specified file may be the certificate itself or a - single file containing both the certificate and private key. This can be - the file content or the path to the file. + - Certificate used as the client certificate when synchronizing the repository. This is used to communicate authentication information to + the feed source. The value to this option must be the full path to the certificate. The specified file may be the certificate itself or + a single file containing both the certificate and private key. This can be the file content or the path to the file. type: str - aliases: [ importer_ssl_client_cert ] + aliases: [importer_ssl_client_cert] feed_client_key: description: - - Private key to the certificate specified in O(feed_client_cert), - assuming it is not included in the certificate file itself. This can be - the file content or the path to the file. + - Private key to the certificate specified in O(feed_client_cert), assuming it is not included in the certificate file itself. This can + be the file content or the path to the file. type: str - aliases: [ importer_ssl_client_key ] + aliases: [importer_ssl_client_key] name: description: - Name of the repo to add or remove. This correlates to repo-id in Pulp. required: true type: str - aliases: [ repo ] + aliases: [repo] proxy_host: description: - - Proxy url setting for the pulp repository importer. This is in the - format scheme://host. + - Proxy url setting for the pulp repository importer. This is in the format scheme://host. required: false - default: null + default: type: str proxy_port: description: - Proxy port setting for the pulp repository importer. required: false - default: null + default: type: str proxy_username: description: - Proxy username for the pulp repository importer. required: false - default: null + default: type: str proxy_password: description: - Proxy password for the pulp repository importer. required: false - default: null + default: type: str publish_distributor: description: - - Distributor to use when O(state=publish). The default is to - publish all distributors. + - Distributor to use when O(state=publish). The default is to publish all distributors. type: str pulp_host: description: @@ -124,8 +113,7 @@ options: type: str repoview: description: - - Whether to generate repoview files for a published repository. Setting - this to V(true) automatically activates O(generate_sqlite). + - Whether to generate repoview files for a published repository. Setting this to V(true) automatically activates O(generate_sqlite). required: false type: bool default: false @@ -141,24 +129,21 @@ options: default: true state: description: - - The repo state. A state of V(sync) will queue a sync of the repo. - This is asynchronous but not delayed like a scheduled sync. A state of - V(publish) will use the repository's distributor to publish the content. + - The repo state. A state of V(sync) will queue a sync of the repo. This is asynchronous but not delayed like a scheduled sync. A state + of V(publish) will use the repository's distributor to publish the content. default: present - choices: [ "present", "absent", "sync", "publish" ] + choices: ["present", "absent", "sync", "publish"] type: str url_password: description: - - The password for use in HTTP basic authentication to the pulp API. - If the O(url_username) parameter is not specified, the O(url_password) + - The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used. url_username: description: - The username for use in HTTP basic authentication to the pulp API. validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be - used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true wait_for_completion: @@ -167,14 +152,13 @@ options: type: bool default: false notes: - - This module can currently only create distributors and importers on rpm - repositories. Contributions to support other repo types are welcome. + - This module can currently only create distributors and importers on rpm repositories. Contributions to support other repo types are welcome. extends_documentation_fragment: - ansible.builtin.url - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new repo with name 'my_repo' community.general.pulp_repo: name: my_repo @@ -197,15 +181,15 @@ EXAMPLES = ''' name: my_old_repo repo_type: rpm state: absent -''' +""" -RETURN = ''' +RETURN = r""" repo: description: Name of the repo that the action was performed on. returned: success type: str sample: my_repo -''' +""" import json import os diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index 46326c667f..cf4cfae47c 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: puppet short_description: Runs puppet description: @@ -66,11 +65,11 @@ options: version_added: 5.1.0 logdest: description: - - Where the puppet logs should go, if puppet apply is being used. - - V(all) will go to both C(console) and C(syslog). - - V(stdout) will be deprecated and replaced by C(console). + - Where the puppet logs should go, if puppet apply is being used. + - V(all) will go to both C(console) and C(syslog). + - V(stdout) will be deprecated and replaced by C(console). type: str - choices: [ all, stdout, syslog ] + choices: [all, stdout, syslog] default: stdout certname: description: @@ -94,7 +93,7 @@ options: type: str use_srv_records: description: - - Toggles use_srv_records flag + - Toggles use_srv_records flag. type: bool summarize: description: @@ -103,8 +102,8 @@ options: default: false waitforlock: description: - - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. - - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes and V(h) for hours. + - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. + - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes and V(h) for hours. type: str version_added: 9.0.0 verbose: @@ -119,27 +118,27 @@ options: default: false show_diff: description: - - Whether to print file changes details + - Whether to print file changes details. type: bool default: false environment_lang: description: - The lang environment to use when running the puppet agent. - - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output - - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure - the selected locale is supported on the system the puppet agent runs on. - - Starting with community.general 9.1.0, you can use the value V(auto) and the module will - try and determine the best parseable locale to use. + - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output. + - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the selected locale is + supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module will try and determine the best parseable locale to + use. type: str default: C version_added: 8.6.0 requirements: -- puppet + - puppet author: -- Monty Taylor (@emonty) -''' + - Monty Taylor (@emonty) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Run puppet agent and fail if anything goes wrong community.general.puppet: @@ -162,10 +161,10 @@ EXAMPLES = r''' - name: Run puppet using a specific tags community.general.puppet: tags: - - update - - nginx + - update + - nginx skip_tags: - - service + - service - name: Wait 30 seconds for any current puppet runs to finish community.general.puppet: @@ -184,7 +183,7 @@ EXAMPLES = r''' modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules logdest: all manifest: /var/lib/example/puppet_step_config.pp -''' +""" import json import os diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py index 673f30cc36..32a659922a 100644 --- a/plugins/modules/pushbullet.py +++ b/plugins/modules/pushbullet.py @@ -9,65 +9,59 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: "Willy Barro (@willybarro)" -requirements: [ pushbullet.py ] +requirements: [pushbullet.py] module: pushbullet short_description: Sends notifications to Pushbullet description: - - This module sends push notifications via Pushbullet to channels or devices. + - This module sends push notifications through Pushbullet to channels or devices. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - api_key: - type: str - description: - - Push bullet API token - required: true - channel: - type: str - description: - - The channel TAG you wish to broadcast a push notification, - as seen on the "My Channels" > "Edit your channel" at - Pushbullet page. - device: - type: str - description: - - The device NAME you wish to send a push notification, - as seen on the Pushbullet main page. - push_type: - type: str - description: - - Thing you wish to push. - default: note - choices: [ "note", "link" ] - title: - type: str - description: - - Title of the notification. - required: true - body: - type: str - description: - - Body of the notification, e.g. Details of the fault you're alerting. - url: - type: str - description: - - URL field, used when O(push_type=link). - + api_key: + type: str + description: + - Push bullet API token. + required: true + channel: + type: str + description: + - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet page. + device: + type: str + description: + - The device NAME you wish to send a push notification, as seen on the Pushbullet main page. + push_type: + type: str + description: + - Thing you wish to push. + default: note + choices: ["note", "link"] + title: + type: str + description: + - Title of the notification. + required: true + body: + type: str + description: + - Body of the notification, for example details of the fault you are alerting. + url: + type: str + description: + - URL field, used when O(push_type=link). notes: - - Requires pushbullet.py Python package on the remote host. - You can install it via pip with ($ pip install pushbullet.py). - See U(https://github.com/randomchars/pushbullet.py) -''' + - Requires C(pushbullet.py) Python package on the remote host. You can install it through C(pip) with C(pip install pushbullet.py). + - See U(https://github.com/randomchars/pushbullet.py). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Sends a push notification to a device community.general.pushbullet: api_key: "ABC123abc123ABC123abc123ABC123ab" @@ -94,7 +88,7 @@ EXAMPLES = ''' channel: my-awesome-channel title: ALERT! Signup service is down body: Error rate on signup service is over 90% for more than 2 minutes -''' +""" import traceback diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py index f5493731fa..ae57411531 100644 --- a/plugins/modules/pushover.py +++ b/plugins/modules/pushover.py @@ -9,16 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pushover -short_description: Send notifications via U(https://pushover.net) +short_description: Send notifications through U(https://pushover.net) description: - - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. + - Send notifications through pushover to subscriber list of devices and email addresses. Requires pushover app on devices. notes: - - You will require a pushover.net account to use this module. But no account - is required to receive messages. + - You will require a pushover.net account to use this module. But no account is required to receive messages. extends_documentation_fragment: - community.general.attributes attributes: @@ -53,7 +50,7 @@ options: - Message priority (see U(https://pushover.net) for details). required: false default: '0' - choices: [ '-2', '-1', '0', '1', '2' ] + choices: ['-2', '-1', '0', '1', '2'] device: type: str description: @@ -64,9 +61,9 @@ options: author: - "Jim Richardson (@weaselkeeper)" - "Bernd Arnold (@wopfel)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send notifications via pushover.net community.general.pushover: msg: '{{ inventory_hostname }} is acting strange ...' @@ -90,7 +87,7 @@ EXAMPLES = ''' user_key: baa5fe97f2c5ab3ca8f0bb59 device: admins-iPhone delegate_to: localhost -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py index 8e709440d1..17432583e3 100644 --- a/plugins/modules/python_requirements_info.py +++ b/plugins/modules/python_requirements_info.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: python_requirements_info short_description: Show python path and assert dependency versions description: @@ -19,18 +19,17 @@ options: dependencies: type: list elements: str - description: > - A list of version-likes or module names to check for installation. - Supported operators: <, >, <=, >=, or ==. The bare module name like - V(ansible), the module with a specific version like V(boto3==1.6.1), or a - partial version like V(requests>2) are all valid specifications. + description: + - 'A list of version-likes or module names to check for installation. Supported operators: C(<), C(>), C(<=), C(>=), or C(==).' + - The bare module name like V(ansible), the module with a specific version like V(boto3==1.6.1), + or a partial version like V(requests>2) are all valid specifications. default: [] author: - Will Thames (@willthames) - Ryan Scott Brown (@ryansb) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Show python lib/site paths community.general.python_requirements_info: @@ -39,21 +38,21 @@ EXAMPLES = ''' dependencies: - boto3>1.6 - botocore<2 -''' +""" -RETURN = ''' +RETURN = r""" python: - description: path to python version used + description: Path to the Python interpreter used. returned: always type: str sample: /usr/local/opt/python@2/bin/python2.7 python_version: - description: version of python + description: Version of Python. returned: always type: str sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]" python_version_info: - description: breakdown version of python + description: Breakdown version of Python. returned: always type: dict contains: @@ -84,25 +83,26 @@ python_version_info: sample: 0 version_added: 4.2.0 python_system_path: - description: List of paths python is looking for modules in + description: List of paths Python is looking for modules in. returned: always type: list sample: - /usr/local/opt/python@2/site-packages/ - /usr/lib/python/site-packages/ valid: - description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) will be null + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) will be + null. returned: always type: dict sample: boto3: - desired: null + desired: installed: 1.7.60 botocore: desired: botocore<2 installed: 1.10.60 mismatched: - description: A dictionary of dependencies that did not satisfy the desired version + description: A dictionary of dependencies that did not satisfy the desired version. returned: always type: dict sample: @@ -110,13 +110,13 @@ mismatched: desired: botocore>2 installed: 1.10.60 not_found: - description: A list of packages that could not be imported at all, and are not installed + description: A list of packages that could not be imported at all, and are not installed. returned: always type: list sample: - boto4 - requests -''' +""" import re import sys From cea6eeef371d6751220679459ed9ee77c157ffa3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 26 Dec 2024 21:12:05 +1300 Subject: [PATCH 413/482] l*.py: normalize docs (#9390) --- plugins/modules/launchd.py | 90 +++--- plugins/modules/layman.py | 26 +- plugins/modules/lbu.py | 30 +- plugins/modules/ldap_attrs.py | 122 ++++---- plugins/modules/ldap_entry.py | 73 ++--- plugins/modules/ldap_passwd.py | 28 +- plugins/modules/ldap_search.py | 30 +- plugins/modules/librato_annotation.py | 117 ++++---- plugins/modules/linode.py | 216 +++++++------- plugins/modules/linode_v4.py | 66 ++--- plugins/modules/listen_ports_facts.py | 34 +-- plugins/modules/lldp.py | 28 +- plugins/modules/locale_gen.py | 51 ++-- plugins/modules/logentries.py | 72 ++--- plugins/modules/logentries_msg.py | 21 +- plugins/modules/logstash_plugin.py | 78 +++-- plugins/modules/lvg.py | 59 ++-- plugins/modules/lvg_rename.py | 19 +- plugins/modules/lvol.py | 61 ++-- plugins/modules/lxc_container.py | 411 +++++++++++++------------- plugins/modules/lxca_cmms.py | 59 ++-- plugins/modules/lxca_nodes.py | 63 ++-- plugins/modules/lxd_container.py | 370 +++++++++++------------ plugins/modules/lxd_profile.py | 244 ++++++++------- plugins/modules/lxd_project.py | 176 ++++++----- 25 files changed, 1194 insertions(+), 1350 deletions(-) diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index 9717825c71..ea2163964b 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: launchd author: - Martin Migasiewicz (@martinm82) @@ -20,57 +19,52 @@ description: extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: + name: + description: - Name of the service. - type: str - required: true - plist: - description: + type: str + required: true + plist: + description: - Name of the V(.plist) file for the service. - Defaults to V({name}.plist). - type: str - version_added: 10.1.0 - state: - description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. - - Launchd does not support V(restarted) nor V(reloaded) natively. - These will trigger a stop/start (restarted) or an unload/load - (reloaded). - - V(restarted) unloads and loads the service before start to ensure - that the latest job definition (plist) is used. - - V(reloaded) unloads and loads the service to ensure that the latest - job definition (plist) is used. Whether a service is started or - stopped depends on the content of the definition file. - type: str - choices: [ reloaded, restarted, started, stopped, unloaded ] - enabled: - description: + type: str + version_added: 10.1.0 + state: + description: + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. + - Launchd does not support V(restarted) nor V(reloaded) natively. These will trigger a stop/start (restarted) or an unload/load (reloaded). + - V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used. + - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service is started or stopped + depends on the content of the definition file. + type: str + choices: [reloaded, restarted, started, stopped, unloaded] + enabled: + description: - Whether the service should start on boot. - - B(At least one of state and enabled are required.) - type: bool - force_stop: - description: + - B(At least one of state and enabled are required). + type: bool + force_stop: + description: - Whether the service should not be restarted automatically by launchd. - - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. - In case this is set to true, stopping a service will cause that launchd starts the service again. - - Set this option to V(true) to let this module change the 'KeepAlive' attribute to V(false). - type: bool - default: false + - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. In case this is set to true, stopping a service + will cause that launchd starts the service again. + - Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false). + type: bool + default: false notes: -- A user must privileged to manage services using this module. + - A user must privileged to manage services using this module. requirements: -- A system managed by launchd -- The plistlib python library -''' + - A system managed by launchd + - The plistlib Python library +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Make sure spotify webhelper is started community.general.launchd: name: com.spotify.webhelper @@ -112,11 +106,11 @@ EXAMPLES = r''' name: com.openssh.sshd plist: ssh.plist state: restarted -''' +""" -RETURN = r''' +RETURN = r""" status: - description: Metadata about service status + description: Metadata about service status. returned: always type: dict sample: @@ -126,7 +120,7 @@ status: "previous_pid": "82636", "previous_state": "running" } -''' +""" import os import plistlib diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py index 13d514274b..21b4eba9a4 100644 --- a/plugins/modules/layman.py +++ b/plugins/modules/layman.py @@ -10,14 +10,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: layman author: "Jakub Jirutka (@jirutka)" short_description: Manage Gentoo overlays description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. - Please note that Layman must be installed on a managed node prior using this module. + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman must be installed + on a managed node prior using this module. requirements: - layman python module extends_documentation_fragment: @@ -30,15 +29,13 @@ attributes: options: name: description: - - The overlay id to install, synchronize, or uninstall. - Use 'ALL' to sync all of the installed overlays (can be used only when O(state=updated)). + - The overlay id to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used only when O(state=updated)). required: true type: str list_url: description: - - An URL of the alternative overlays list that defines the overlay to install. - This list will be fetched and saved under C(${overlay_defs}/${name}.xml), where - C(overlay_defs) is read from the Layman's configuration. + - An URL of the alternative overlays list that defines the overlay to install. This list will be fetched and saved under C(${overlay_defs}/${name}.xml), + where C(overlay_defs) is read from the Layman's configuration. aliases: [url] type: str state: @@ -49,14 +46,13 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be - set to V(false) when no other option exists. Prior to 1.9.3 the code - defaulted to V(false). + - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists. Prior to 1.9.3 the + code defaulted to V(false). type: bool default: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install the overlay mozilla which is on the central overlays list community.general.layman: name: mozilla @@ -81,7 +77,7 @@ EXAMPLES = ''' community.general.layman: name: cvut state: absent -''' +""" import shutil import traceback diff --git a/plugins/modules/lbu.py b/plugins/modules/lbu.py index c961b6060d..e91fd5e01a 100644 --- a/plugins/modules/lbu.py +++ b/plugins/modules/lbu.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lbu short_description: Local Backup Utility for Alpine Linux @@ -17,8 +16,7 @@ short_description: Local Backup Utility for Alpine Linux version_added: '0.2.0' description: - - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode - + - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode. extends_documentation_fragment: - community.general.attributes @@ -31,24 +29,24 @@ attributes: options: commit: description: - - Control whether to commit changed files. + - Control whether to commit changed files. type: bool exclude: description: - - List of paths to exclude. + - List of paths to exclude. type: list elements: str include: description: - - List of paths to include. + - List of paths to include. type: list elements: str author: - Kaarle Ritvanen (@kunkku) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Commit changed files (if any) - name: Commit community.general.lbu: @@ -59,22 +57,22 @@ EXAMPLES = ''' community.general.lbu: commit: true exclude: - - /etc/opt + - /etc/opt # Include paths without committing - name: Include file and directory community.general.lbu: include: - - /root/.ssh/authorized_keys - - /var/lib/misc -''' + - /root/.ssh/authorized_keys + - /var/lib/misc +""" -RETURN = ''' +RETURN = r""" msg: - description: Error message + description: Error message. type: str returned: on failure -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py index 7986833a6e..8f1e0a0ea9 100644 --- a/plugins/modules/ldap_attrs.py +++ b/plugins/modules/ldap_attrs.py @@ -12,27 +12,19 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ldap_attrs short_description: Add or remove multiple LDAP attribute values description: - Add or remove multiple LDAP attribute values. notes: - - This only deals with attributes on existing entries. To add or remove - whole entries, see M(community.general.ldap_entry). - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in O(bind_dn) - and O(bind_pw). - - For O(state=present) and O(state=absent), all value comparisons are - performed on the server for maximum accuracy. For O(state=exact), values - have to be compared in Python, which obviously ignores LDAP matching - rules. This should work out in most cases, but it is theoretically - possible to see spurious changes when target and actual values are - semantically identical but lexically distinct. + - This only deals with attributes on existing entries. To add or remove whole entries, see M(community.general.ldap_entry). + - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to modify the server configuration. If you need + to use a simple bind to access your server, pass the credentials in O(bind_dn) and O(bind_pw). + - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For O(state=exact), values + have to be compared in Python, which obviously ignores LDAP matching rules. This should work out in most cases, but it is theoretically possible + to see spurious changes when target and actual values are semantically identical but lexically distinct. version_added: '0.2.0' author: - Jiri Tyr (@jtyr) @@ -53,46 +45,38 @@ options: choices: [present, absent, exact] default: present description: - - The state of the attribute values. If V(present), all given attribute - values will be added if they're missing. If V(absent), all given - attribute values will be removed if present. If V(exact), the set of - attribute values will be forced to exactly those provided and no others. - If O(state=exact) and the attribute value is empty, all values for - this attribute will be removed. + - The state of the attribute values. If V(present), all given attribute values will be added if they are missing. If V(absent), all given + attribute values will be removed if present. If V(exact), the set of attribute values will be forced to exactly those provided and no + others. If O(state=exact) and the attribute value is empty, all values for this attribute will be removed. attributes: required: true type: dict description: - The attribute(s) and value(s) to add or remove. - - Each attribute value can be a string for single-valued attributes or - a list of strings for multi-valued attributes. - - If you specify values for this option in YAML, please note that you can improve - readability for long string values by using YAML block modifiers as seen in the - examples for this module. - - Note that when using values that YAML/ansible-core interprets as other types, - like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if - these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values by using YAML block + modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) (float), make + sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. ordered: required: false type: bool default: false description: - - If V(true), prepend list values with X-ORDERED index numbers in all - attributes specified in the current task. This is useful mostly with + - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This is useful mostly with C(olcAccess) attribute to easily manage LDAP Access Control Lists. extends_documentation_fragment: - community.general.ldap.documentation - community.general.attributes - -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure directory number 1 for example.com community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcSuffix: dc=example,dc=com + olcSuffix: dc=example,dc=com state: exact # The complex argument format is required here to pass a list of ACL strings. @@ -100,17 +84,17 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcAccess: - - >- - {0}to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - {1}to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read state: exact # An alternative approach with automatic X-ORDERED numbering @@ -118,17 +102,17 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcAccess: - - >- - to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read + olcAccess: + - >- + to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read ordered: true state: exact @@ -136,23 +120,23 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcDbIndex: - - objectClass eq - - uid eq + olcDbIndex: + - objectClass eq + - uid eq - name: Set up a root user, which we can use later to bootstrap the directory community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcRootDN: cn=root,dc=example,dc=com - olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" state: exact - name: Remove an attribute with a specific value community.general.ldap_attrs: dn: uid=jdoe,ou=people,dc=example,dc=com attributes: - description: "An example user account" + description: "An example user account" state: absent server_uri: ldap://localhost/ bind_dn: cn=admin,dc=example,dc=com @@ -162,22 +146,22 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: uid=jdoe,ou=people,dc=example,dc=com attributes: - description: [] + description: [] state: exact server_uri: ldap://localhost/ bind_dn: cn=admin,dc=example,dc=com bind_pw: password -''' +""" -RETURN = r''' +RETURN = r""" modlist: - description: list of modified parameters + description: List of modified parameters. returned: success type: list sample: - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] -''' +""" import traceback diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py index 5deaf7c4c4..d3ce90433a 100644 --- a/plugins/modules/ldap_entry.py +++ b/plugins/modules/ldap_entry.py @@ -11,21 +11,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ldap_entry short_description: Add or remove LDAP entries description: - - Add or remove LDAP entries. This module only asserts the existence or - non-existence of an LDAP entry, not its attributes. To assert the - attribute values of an entry, see M(community.general.ldap_attrs). + - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. To assert the attribute + values of an entry, see M(community.general.ldap_attrs). notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in O(bind_dn) - and O(bind_pw). + - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to modify the server configuration. If you need + to use a simple bind to access your server, pass the credentials in O(bind_dn) and O(bind_pw). author: - Jiri Tyr (@jtyr) requirements: @@ -38,24 +33,18 @@ attributes: options: attributes: description: - - If O(state=present), attributes necessary to create an entry. Existing - entries are never modified. To assert specific attribute values on an - existing entry, use M(community.general.ldap_attrs) module instead. - - Each attribute value can be a string for single-valued attributes or - a list of strings for multi-valued attributes. - - If you specify values for this option in YAML, please note that you can improve - readability for long string values by using YAML block modifiers as seen in the - examples for this module. - - Note that when using values that YAML/ansible-core interprets as other types, - like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if - these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific attribute values + on an existing entry, use M(community.general.ldap_attrs) module instead. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values by using YAML block + modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) (float), make + sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. type: dict default: {} objectClass: description: - - If O(state=present), value or list of values to use when creating - the entry. It can either be a string or an actual list of - strings. + - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual list of strings. type: list elements: str state: @@ -66,19 +55,17 @@ options: type: str recursive: description: - - If O(state=delete), a flag indicating whether a single entry or the - whole branch must be deleted. + - If O(state=delete), a flag indicating whether a single entry or the whole branch must be deleted. type: bool default: false version_added: 4.6.0 extends_documentation_fragment: - community.general.ldap.documentation - community.general.attributes - -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Make sure we have a parent entry for users community.general.ldap_entry: dn: ou=users,dc=example,dc=com @@ -103,19 +90,19 @@ EXAMPLES = """ attributes: description: An LDAP Administrator roleOccupant: - - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com - - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com + - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com + - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com olcAccess: - - >- - {0}to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - {1}to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read - name: Get rid of an old entry community.general.ldap_entry: @@ -143,7 +130,7 @@ EXAMPLES = """ """ -RETURN = """ +RETURN = r""" # Default return values """ diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py index 5044586b0f..ab2c9a890b 100644 --- a/plugins/modules/ldap_passwd.py +++ b/plugins/modules/ldap_passwd.py @@ -9,21 +9,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ldap_passwd short_description: Set passwords in LDAP description: - - Set a password for an LDAP entry. This module only asserts that - a given password is valid for a given entry. To assert the - existence of an entry, see M(community.general.ldap_entry). + - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert the existence of an + entry, see M(community.general.ldap_entry). notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in O(bind_dn) - and O(bind_pw). + - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to modify the server configuration. If you + need to use a simple bind to access your server, pass the credentials in O(bind_dn) and O(bind_pw). author: - Keller Fuchs (@KellerFuchs) requirements: @@ -41,10 +36,9 @@ options: extends_documentation_fragment: - community.general.ldap.documentation - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Set a password for the admin user community.general.ldap_passwd: dn: cn=admin,dc=example,dc=com @@ -56,13 +50,13 @@ EXAMPLES = """ passwd: "{{ item.value }}" with_dict: alice: alice123123 - bob: "|30b!" + bob: "|30b!" admin: "{{ vault_secret }}" """ -RETURN = """ +RETURN = r""" modlist: - description: list of modified parameters + description: List of modified parameters. returned: success type: list sample: diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py index 7958f86e0b..6c47c2f7e0 100644 --- a/plugins/modules/ldap_search.py +++ b/plugins/modules/ldap_search.py @@ -10,19 +10,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" ---- module: ldap_search version_added: '0.2.0' short_description: Search for entries in a LDAP server description: - Return the results of an LDAP search. notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in O(bind_dn) - and O(bind_pw). + - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to modify the server configuration. If you + need to use a simple bind to access your server, pass the credentials in O(bind_dn) and O(bind_pw). author: - Sebastian Pfahl (@eryx12o45) requirements: @@ -55,30 +51,26 @@ options: type: list elements: str description: - - A list of attributes for limiting the result. Use an - actual list or a comma-separated string. + - A list of attributes for limiting the result. Use an actual list or a comma-separated string. schema: default: false type: bool description: - - Set to V(true) to return the full attribute schema of entries, not - their attribute values. Overrides O(attrs) when provided. + - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when provided. page_size: default: 0 type: int description: - - The page size when performing a simple paged result search (RFC 2696). - This setting can be tuned to reduce issues with timeouts and server limits. + - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues with timeouts and server + limits. - Setting the page size to V(0) (default) disables paged searching. version_added: 7.1.0 base64_attributes: description: - - If provided, all attribute values returned that are listed in this option - will be Base64 encoded. - - If the special value V(*) appears in this list, all attributes will be - Base64 encoded. - - All other attribute values will be converted to UTF-8 strings. If they - contain binary data, please note that invalid UTF-8 bytes will be omitted. + - If provided, all attribute values returned that are listed in this option will be Base64 encoded. + - If the special value V(*) appears in this list, all attributes will be Base64 encoded. + - All other attribute values will be converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8 bytes will + be omitted. type: list elements: str version_added: 7.0.0 diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py index ebfb751546..b4f7dd54cc 100644 --- a/plugins/modules/librato_annotation.py +++ b/plugins/modules/librato_annotation.py @@ -9,74 +9,73 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: librato_annotation short_description: Create an annotation in librato description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically + - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically. author: "Seth Edwards (@Sedward)" requirements: [] extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - user: - type: str - description: - - Librato account username - required: true - api_key: - type: str - description: - - Librato account api key - required: true - name: - type: str - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - type: str - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - type: str - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false + user: + type: str description: - type: str - description: - - The description contains extra metadata about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation started - required: false - end_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - type: list - elements: dict - description: - - See examples -''' + - Librato account username. + required: true + api_key: + type: str + description: + - Librato account api key. + required: true + name: + type: str + description: + - The annotation stream name. + - If the annotation stream does not exist, it will be created automatically. + required: false + title: + type: str + description: + - The title of an annotation is a string and may contain spaces. + - The title should be a short, high-level summary of the annotation for example V(v45 Deployment). + required: true + source: + type: str + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population. + required: false + description: + type: str + description: + - The description contains extra metadata about a particular annotation. + - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature foo!). + required: false + start_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation started. + required: false + end_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation ended. + - For events that have a duration, this is a useful way to annotate the duration of the event. + required: false + links: + type: list + elements: dict + description: + - See examples. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a simple annotation event with a source community.general.librato_annotation: user: user@example.com @@ -105,7 +104,7 @@ EXAMPLES = ''' description: This is a detailed description of maintenance start_time: 1395940006 end_time: 1395954406 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py index 9b0dabdff2..9d907c898b 100644 --- a/plugins/modules/linode.py +++ b/plugins/modules/linode.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: linode short_description: Manage instances on the Linode Public Cloud description: @@ -24,34 +23,32 @@ attributes: options: state: description: - - Indicate desired state of the resource - choices: [ absent, active, deleted, present, restarted, started, stopped ] + - Indicate desired state of the resource. + choices: [absent, active, deleted, present, restarted, started, stopped] default: present type: str api_key: description: - - Linode API key. - - E(LINODE_API_KEY) environment variable can be used instead. + - Linode API key. + - E(LINODE_API_KEY) environment variable can be used instead. type: str required: true name: description: - - Name to give the instance (alphanumeric, dashes, underscore). - - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). + - Name to give the instance (alphanumeric, dashes, underscore). + - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). required: true type: str displaygroup: description: - - Add the instance to a Display Group in Linode Manager. + - Add the instance to a Display Group in Linode Manager. type: str default: '' linode_id: description: - - Unique ID of a linode server. This value is read-only in the sense that - if you specify it on creation of a Linode it will not be used. The - Linode API generates these IDs and we can those generated value here to - reference a Linode more specifically. This is useful for idempotence. - aliases: [ lid ] + - Unique ID of a linode server. This value is read-only in the sense that if you specify it on creation of a Linode it will not be used. + The Linode API generates these IDs and we can those generated value here to reference a Linode more specifically. This is useful for idempotence. + aliases: [lid] type: int additional_disks: description: @@ -61,119 +58,118 @@ options: elements: dict alert_bwin_enabled: description: - - Set status of bandwidth in alerts. + - Set status of bandwidth in alerts. type: bool alert_bwin_threshold: description: - - Set threshold in MB of bandwidth in alerts. + - Set threshold in MB of bandwidth in alerts. type: int alert_bwout_enabled: description: - - Set status of bandwidth out alerts. + - Set status of bandwidth out alerts. type: bool alert_bwout_threshold: description: - - Set threshold in MB of bandwidth out alerts. + - Set threshold in MB of bandwidth out alerts. type: int alert_bwquota_enabled: description: - - Set status of bandwidth quota alerts as percentage of network transfer quota. + - Set status of bandwidth quota alerts as percentage of network transfer quota. type: bool alert_bwquota_threshold: description: - - Set threshold in MB of bandwidth quota alerts. + - Set threshold in MB of bandwidth quota alerts. type: int alert_cpu_enabled: description: - - Set status of receiving CPU usage alerts. + - Set status of receiving CPU usage alerts. type: bool alert_cpu_threshold: description: - - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. + - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. type: int alert_diskio_enabled: description: - - Set status of receiving disk IO alerts. + - Set status of receiving disk IO alerts. type: bool alert_diskio_threshold: description: - - Set threshold for average IO ops/sec over 2 hour period. + - Set threshold for average IO ops/sec over 2 hour period. type: int backupweeklyday: description: - - Day of the week to take backups. + - Day of the week to take backups. type: int backupwindow: description: - - The time window in which backups will be taken. + - The time window in which backups will be taken. type: int plan: description: - - plan to use for the instance (Linode plan) + - Plan to use for the instance (Linode plan). type: int payment_term: description: - - payment term to use for the instance (payment term in months) + - Payment term to use for the instance (payment term in months). default: 1 - choices: [ 1, 12, 24 ] + choices: [1, 12, 24] type: int password: description: - - root password to apply to a new server (auto generated if missing) + - Root password to apply to a new server (auto generated if missing). type: str private_ip: description: - - Add private IPv4 address when Linode is created. - - Default is V(false). + - Add private IPv4 address when Linode is created. + - Default is V(false). type: bool ssh_pub_key: description: - - SSH public key applied to root user + - SSH public key applied to root user. type: str swap: description: - - swap size in MB + - Swap size in MB. default: 512 type: int distribution: description: - - distribution to use for the instance (Linode Distribution) + - Distribution to use for the instance (Linode Distribution). type: int datacenter: description: - - datacenter to create an instance in (Linode Datacenter) + - Datacenter to create an instance in (Linode Datacenter). type: int kernel_id: description: - - kernel to use for the instance (Linode Kernel) + - Kernel to use for the instance (Linode Kernel). type: int wait: description: - - wait for the instance to be in state V(running) before returning + - Wait for the instance to be in state V(running) before returning. type: bool default: true wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. default: 300 type: int watchdog: description: - - Set status of Lassie watchdog. + - Set status of Lassie watchdog. type: bool default: true requirements: - - linode-python + - linode-python author: -- Vincent Viallet (@zbal) + - Vincent Viallet (@zbal) notes: - Please note, linode-python does not have python 3 support. - This module uses the now deprecated v3 of the Linode API. - Please review U(https://www.linode.com/api/linode) for determining the required parameters. -''' - -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Create a new Linode community.general.linode: name: linode-test1 @@ -185,97 +181,97 @@ EXAMPLES = ''' - name: Create a server with a private IP Address community.general.linode: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - private_ip: true - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: true - wait_timeout: 600 - state: present + module: linode + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present delegate_to: localhost register: linode_creation - name: Fully configure new server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 4 - datacenter: 2 - distribution: 99 - kernel_id: 138 - password: 'superSecureRootPassword' - private_ip: true - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: true - wait_timeout: 600 - state: present - alert_bwquota_enabled: true - alert_bwquota_threshold: 80 - alert_bwin_enabled: true - alert_bwin_threshold: 10 - alert_cpu_enabled: true - alert_cpu_threshold: 210 - alert_bwout_enabled: true - alert_bwout_threshold: 10 - alert_diskio_enabled: true - alert_diskio_threshold: 10000 - backupweeklyday: 1 - backupwindow: 2 - displaygroup: 'test' - additional_disks: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 4 + datacenter: 2 + distribution: 99 + kernel_id: 138 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + alert_bwquota_enabled: true + alert_bwquota_threshold: 80 + alert_bwin_enabled: true + alert_bwin_threshold: 10 + alert_cpu_enabled: true + alert_cpu_threshold: 210 + alert_bwout_enabled: true + alert_bwout_threshold: 10 + alert_diskio_enabled: true + alert_diskio_threshold: 10000 + backupweeklyday: 1 + backupwindow: 2 + displaygroup: 'test' + additional_disks: - {Label: 'disk1', Size: 2500, Type: 'raw'} - {Label: 'newdisk', Size: 2000} - watchdog: true + watchdog: true delegate_to: localhost register: linode_creation - name: Ensure a running server (create if missing) community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: true - wait_timeout: 600 - state: present + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present delegate_to: localhost register: linode_creation - name: Delete a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: absent + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: absent delegate_to: localhost - name: Stop a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: stopped + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: stopped delegate_to: localhost - name: Reboot a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: restarted + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: restarted delegate_to: localhost -''' +""" import time import traceback diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py index da885f3a5f..cac890f79b 100644 --- a/plugins/modules/linode_v4.py +++ b/plugins/modules/linode_v4.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: linode_v4 short_description: Manage instances on the Linode cloud description: Manage instances on the Linode cloud. @@ -18,9 +17,8 @@ requirements: author: - Luke Murphy (@decentral1se) notes: - - No Linode resizing is currently implemented. This module will, in time, - replace the current Linode module which uses deprecated API bindings on the - Linode side. + - No Linode resizing is currently implemented. This module will, in time, replace the current Linode module which uses deprecated API bindings + on the Linode side. extends_documentation_fragment: - community.general.attributes attributes: @@ -31,52 +29,44 @@ attributes: options: region: description: - - The region of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/regions/). + - The region of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/regions/). type: str image: description: - - The image of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/images/). + - The image of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/images/). type: str type: description: - - The type of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/linode-types/). + - The type of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/linode-types/). type: str label: description: - - The instance label. This label is used as the main determiner for - idempotence for the module and is therefore mandatory. + - The instance label. This label is used as the main determiner for idempotence for the module and is therefore mandatory. type: str required: true group: description: - - The group that the instance should be marked under. Please note, that - group labelling is deprecated but still supported. The encouraged - method for marking instances is to use tags. + - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported. The encouraged + method for marking instances is to use tags. type: str private_ip: description: - - If V(true), the created Linode will have private networking enabled and - assigned a private IPv4 address. + - If V(true), the created Linode will have private networking enabled and assigned a private IPv4 address. type: bool default: false version_added: 3.0.0 tags: description: - - The tags that the instance should be marked under. See - U(https://www.linode.com/docs/api/tags/). + - The tags that the instance should be marked under. + - See U(https://www.linode.com/docs/api/tags/). type: list elements: str root_pass: description: - - The password for the root user. If not specified, one will be - generated. This generated password will be available in the task - success JSON. + - The password for the root user. If not specified, one will be generated. This generated password will be available in the task success + JSON. type: str authorized_keys: description: @@ -88,33 +78,31 @@ options: - The desired instance state. type: str choices: - - present - - absent + - present + - absent required: true access_token: description: - - The Linode API v4 access token. It may also be specified by exposing - the E(LINODE_ACCESS_TOKEN) environment variable. See - U(https://www.linode.com/docs/api#access-and-authentication). + - The Linode API v4 access token. It may also be specified by exposing the E(LINODE_ACCESS_TOKEN) environment variable. + - See U(https://www.linode.com/docs/api#access-and-authentication). required: true type: str stackscript_id: description: - The numeric ID of the StackScript to use when creating the instance. - See U(https://www.linode.com/docs/api/stackscripts/). + - See U(https://www.linode.com/docs/api/stackscripts/). type: int version_added: 1.3.0 stackscript_data: description: - - An object containing arguments to any User Defined Fields present in - the StackScript used when creating the instance. - Only valid when a stackscript_id is provided. - See U(https://www.linode.com/docs/api/stackscripts/). + - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance. Only valid when + a O(stackscript_id) is provided. + - See U(https://www.linode.com/docs/api/stackscripts/). type: dict version_added: 1.3.0 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Create a new Linode. community.general.linode_v4: label: new-linode @@ -135,7 +123,7 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" instance: description: The instance description in JSON serialized form. returned: Always. diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py index 08030a8b37..9f9eb66481 100644 --- a/plugins/modules/listen_ports_facts.py +++ b/plugins/modules/listen_ports_facts.py @@ -8,21 +8,19 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: listen_ports_facts author: - - Nathan Davison (@ndavison) + - Nathan Davison (@ndavison) description: - - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. - - This module currently supports Linux only. + - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. + - This module currently supports Linux only. requirements: - netstat or ss short_description: Gather facts on processes listening on TCP and UDP ports notes: - - | - C(ss) returns all processes for each listen address and port. - This plugin will return each of them, so multiple entries for the same listen address and port are likely in results. + - C(ss) returns all processes for each listen address and port. + - This plugin will return each of them, so multiple entries for the same listen address and port are likely in results. extends_documentation_fragment: - community.general.attributes - community.general.attributes.facts @@ -31,7 +29,7 @@ options: command: description: - Override which command to use for fetching listen ports. - - 'By default module will use first found supported command on the system (in alphanumerical order).' + - By default module will use first found supported command on the system (in alphanumerical order). type: str choices: - netstat @@ -39,15 +37,15 @@ options: version_added: 4.1.0 include_non_listening: description: - - Show both listening and non-listening sockets (for TCP this means established connections). - - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state), - RV(ansible_facts.tcp_listen[].foreign_address), and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts. + - Show both listening and non-listening sockets (for TCP this means established connections). + - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state), RV(ansible_facts.tcp_listen[].foreign_address), + and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts. type: bool default: false version_added: 5.4.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts on listening ports community.general.listen_ports_facts: @@ -77,11 +75,11 @@ EXAMPLES = r''' community.general.listen_ports_facts: command: 'netstat' include_non_listening: true -''' +""" -RETURN = r''' +RETURN = r""" ansible_facts: - description: Dictionary containing details of TCP and UDP ports with listening servers + description: Dictionary containing details of TCP and UDP ports with listening servers. returned: always type: complex contains: @@ -189,7 +187,7 @@ ansible_facts: returned: always type: str sample: "root" -''' +""" import re import platform diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py index fb608ff138..baefb09d91 100644 --- a/plugins/modules/lldp.py +++ b/plugins/modules/lldp.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lldp -requirements: [ lldpctl ] -short_description: Get details reported by lldp +requirements: [lldpctl] +short_description: Get details reported by LLDP description: - - Reads data out of lldpctl + - Reads data out of C(lldpctl). extends_documentation_fragment: - community.general.attributes attributes: @@ -26,25 +25,24 @@ attributes: options: {} author: "Andy Hill (@andyhky)" notes: - - Requires lldpd running and lldp enabled on switches -''' + - Requires C(lldpd) running and LLDP enabled on switches. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Retrieve switch/port information - - name: Gather information from lldp - community.general.lldp: +- name: Gather information from LLDP + community.general.lldp: - - name: Print each switch/port - ansible.builtin.debug: +- name: Print each switch/port + ansible.builtin.debug: msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" - with_items: "{{ lldp.keys() }}" + with_items: "{{ lldp.keys() }}" # TASK: [Print each switch/port] *********************************************************** # ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} # ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} # ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index 8886cdc9cd..6c0412f464 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -8,40 +8,39 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: locale_gen short_description: Creates or removes locales description: - - Manages locales by editing /etc/locale.gen and invoking locale-gen. + - Manages locales by editing /etc/locale.gen and invoking C(locale-gen). author: - - Augustus Kling (@AugustusKling) + - Augustus Kling (@AugustusKling) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: list - elements: str - description: - - Name and encoding of the locales, such as V(en_GB.UTF-8). - - Before community.general 9.3.0, this was a string. Using a string still works. - required: true - state: - type: str - description: - - Whether the locale shall be present. - choices: [ absent, present ] - default: present + name: + type: list + elements: str + description: + - Name and encoding of the locales, such as V(en_GB.UTF-8). + - Before community.general 9.3.0, this was a string. Using a string still works. + required: true + state: + type: str + description: + - Whether the locale shall be present. + choices: [absent, present] + default: present notes: - - This module does not support RHEL-based systems. -''' + - This module does not support RHEL-based systems. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure a locale exists community.general.locale_gen: name: de_CH.UTF-8 @@ -53,7 +52,7 @@ EXAMPLES = ''' - en_GB.UTF-8 - nl_NL.UTF-8 state: present -''' +""" import os import re diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py index f177cf4546..420f054fac 100644 --- a/plugins/modules/logentries.py +++ b/plugins/modules/logentries.py @@ -9,49 +9,49 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logentries author: "Ivan Vanderbyl (@ivanvanderbyl)" -short_description: Module for tracking logs via logentries.com +short_description: Module for tracking logs using U(logentries.com) description: - - Sends logs to LogEntries in realtime + - Sends logs to LogEntries in realtime. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - path: - type: str - description: - - path to a log file - required: true - state: - type: str - description: - - following state of the log - choices: [ 'present', 'absent', 'followed', 'unfollowed' ] - required: false - default: present - name: - type: str - description: - - name of the log - required: false - logtype: - type: str - description: - - type of the log - required: false - aliases: [type] + path: + type: str + description: + - Path to a log file. + required: true + state: + type: str + description: + - Following state of the log. + choices: ['present', 'absent', 'followed', 'unfollowed'] + required: false + default: present + name: + type: str + description: + - Name of the log. + required: false + logtype: + type: str + description: + - Type of the log. + required: false + aliases: [type] notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' + - Requires the LogEntries agent which can be installed following the instructions at U(logentries.com). +""" + +EXAMPLES = r""" - name: Track nginx logs community.general.logentries: path: /var/log/nginx/access.log @@ -62,7 +62,7 @@ EXAMPLES = ''' community.general.logentries: path: /var/log/nginx/error.log state: absent -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py index 03851ad1f4..dd3b88d624 100644 --- a/plugins/modules/logentries_msg.py +++ b/plugins/modules/logentries_msg.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logentries_msg short_description: Send a message to logentries description: - - Send a message to logentries + - Send a message to logentries. extends_documentation_fragment: - community.general.attributes attributes: @@ -36,24 +35,24 @@ options: api: type: str description: - - API endpoint + - API endpoint. default: data.logentries.com port: type: int description: - - API endpoint port + - API endpoint port. default: 80 author: "Jimmy Tang (@jcftang) " -''' +""" -RETURN = '''# ''' +RETURN = """# """ -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to logentries community.general.logentries_msg: - token=00000000-0000-0000-0000-000000000000 - msg="{{ ansible_hostname }}" -''' + token: 00000000-0000-0000-0000-000000000000 + msg: "{{ ansible_hostname }}" +""" import socket diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py index 7ee118ff28..ba7bdc2cc5 100644 --- a/plugins/modules/logstash_plugin.py +++ b/plugins/modules/logstash_plugin.py @@ -8,53 +8,51 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logstash_plugin short_description: Manage Logstash plugins description: - - Manages Logstash plugins. + - Manages Logstash plugins. author: Loic Blot (@nerzhul) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Install plugin with that name. - required: true - state: - type: str - description: - - Apply plugin state. - choices: ["present", "absent"] - default: present - plugin_bin: - type: path - description: - - Specify logstash-plugin to use for plugin management. - default: /usr/share/logstash/bin/logstash-plugin - proxy_host: - type: str - description: - - Proxy host to use during plugin installation. - proxy_port: - type: str - description: - - Proxy port to use during plugin installation. - version: - type: str - description: - - Specify plugin Version of the plugin to install. - If plugin exists with previous version, it will NOT be updated. -''' + name: + type: str + description: + - Install plugin with that name. + required: true + state: + type: str + description: + - Apply plugin state. + choices: ["present", "absent"] + default: present + plugin_bin: + type: path + description: + - Specify logstash-plugin to use for plugin management. + default: /usr/share/logstash/bin/logstash-plugin + proxy_host: + type: str + description: + - Proxy host to use during plugin installation. + proxy_port: + type: str + description: + - Proxy port to use during plugin installation. + version: + type: str + description: + - Specify plugin Version of the plugin to install. If plugin exists with previous version, it will NOT be updated. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Logstash beats input plugin community.general.logstash_plugin: state: present @@ -77,7 +75,7 @@ EXAMPLES = ''' name: logstash-input-beats environment: LS_JAVA_OPTS: "-Xms256m -Xmx256m" -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py index 7ff7e3a2e7..f6f8abc020 100644 --- a/plugins/modules/lvg.py +++ b/plugins/modules/lvg.py @@ -9,10 +9,9 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Alexander Bulimov (@abulimov) + - Alexander Bulimov (@abulimov) module: lvg short_description: Configure LVM volume groups description: @@ -27,78 +26,76 @@ attributes: options: vg: description: - - The name of the volume group. + - The name of the volume group. type: str required: true pvs: description: - - List of comma-separated devices to use as physical devices in this volume group. - - Required when creating or resizing volume group. - - The module will take care of running pvcreate if needed. + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. type: list elements: str pesize: description: - - "The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector - (where the sector size is the largest sector size of the PVs currently used in the VG), - or at least 128KiB." - - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the largest sector size + of the PVs currently used in the VG), or at least 128KiB. + - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. type: str default: "4" pv_options: description: - - Additional options to pass to C(pvcreate) when creating the volume group. + - Additional options to pass to C(pvcreate) when creating the volume group. type: str default: '' pvresize: description: - - If V(true), resize the physical volume to the maximum available size. + - If V(true), resize the physical volume to the maximum available size. type: bool default: false version_added: '0.2.0' vg_options: description: - - Additional options to pass to C(vgcreate) when creating the volume group. + - Additional options to pass to C(vgcreate) when creating the volume group. type: str default: '' state: description: - - Control if the volume group exists and it's state. - - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0 - - "If V(active) or V(inactive), the module manages the VG's logical volumes current state. - The module also handles the VG's autoactivation state if supported - unless when creating a volume group and the autoactivation option specified in O(vg_options)." + - Control if the volume group exists and it's state. + - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0. + - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the VG's autoactivation + state if supported unless when creating a volume group and the autoactivation option specified in O(vg_options). type: str - choices: [ absent, present, active, inactive ] + choices: [absent, present, active, inactive] default: present force: description: - - If V(true), allows to remove volume group with logical volumes. + - If V(true), allows to remove volume group with logical volumes. type: bool default: false reset_vg_uuid: description: - - Whether the volume group's UUID is regenerated. - - This is B(not idempotent). Specifying this parameter always results in a change. + - Whether the volume group's UUID is regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. type: bool default: false version_added: 7.1.0 reset_pv_uuid: description: - - Whether the volume group's physical volumes' UUIDs are regenerated. - - This is B(not idempotent). Specifying this parameter always results in a change. + - Whether the volume group's physical volumes' UUIDs are regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. type: bool default: false version_added: 7.1.0 seealso: -- module: community.general.filesystem -- module: community.general.lvol -- module: community.general.parted + - module: community.general.filesystem + - module: community.general.lvol + - module: community.general.parted notes: - This module does not modify PE size for already present volume group. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB community.general.lvg: vg: vg.services @@ -154,7 +151,7 @@ EXAMPLES = r''' pvs: /dev/sdb1,/dev/sdc5 reset_vg_uuid: true reset_pv_uuid: true -''' +""" import itertools import os diff --git a/plugins/modules/lvg_rename.py b/plugins/modules/lvg_rename.py index bd48ffa62f..37f513697e 100644 --- a/plugins/modules/lvg_rename.py +++ b/plugins/modules/lvg_rename.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - Laszlo Szomor (@lszomor) module: lvg_rename @@ -27,23 +26,23 @@ version_added: 7.1.0 options: vg: description: - - The name or UUID of the source VG. - - See V(vgrename(8\)) for valid values. + - The name or UUID of the source VG. + - See V(vgrename(8\)) for valid values. type: str required: true vg_new: description: - - The new name of the VG. - - See V(lvm(8\)) for valid names. + - The new name of the VG. + - See V(lvm(8\)) for valid names. type: str required: true seealso: -- module: community.general.lvg + - module: community.general.lvg notes: - This module does not modify VG renaming-related configurations like C(fstab) entries or boot parameters. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Rename a VG by name community.general.lvg_rename: vg: vg_orig_name @@ -53,7 +52,7 @@ EXAMPLES = r''' community.general.lvg_rename: vg_uuid: SNgd0Q-rPYa-dPB8-U1g6-4WZI-qHID-N7y9Vj vg_new: vg_new_name -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py index 3a2f5c7cdd..34b24f7570 100644 --- a/plugins/modules/lvol.py +++ b/plugins/modules/lvol.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - - Jeroen Hoekx (@jhoekx) - - Alexander Bulimov (@abulimov) - - Raoul Baudach (@unkaputtbar112) - - Ziga Kern (@zigaSRC) + - Jeroen Hoekx (@jhoekx) + - Alexander Bulimov (@abulimov) + - Raoul Baudach (@unkaputtbar112) + - Ziga Kern (@zigaSRC) module: lvol short_description: Configure LVM logical volumes description: @@ -31,75 +30,69 @@ options: type: str required: true description: - - The volume group this logical volume is part of. + - The volume group this logical volume is part of. lv: type: str description: - - The name of the logical volume. + - The name of the logical volume. size: type: str description: - - The size of the logical volume, according to lvcreate(8) --size, by - default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or - according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN]; - Float values must begin with a digit. - - When resizing, apart from specifying an absolute size you may, according to - lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with - the prefix V(+) or the amount to reduce the logical volume by with prefix V(-). - - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0. - - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent). + - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] + units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN]; Float values must begin with a digit. + - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify the amount to extend + the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix V(-). + - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0. + - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent). state: type: str description: - - Control if the logical volume exists. If V(present) and the - volume does not already exist then the O(size) option is required. - choices: [ absent, present ] + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option is required. + choices: [absent, present] default: present active: description: - - Whether the volume is active and visible to the host. + - Whether the volume is active and visible to the host. type: bool default: true force: description: - - Shrink or remove operations of volumes requires this switch. Ensures that - that filesystems get never corrupted/destroyed by mistake. + - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. type: bool default: false opts: type: str description: - - Free-form options to be passed to the lvcreate command. + - Free-form options to be passed to the lvcreate command. snapshot: type: str description: - - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume. + - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume. pvs: type: list elements: str description: - - List of physical volumes (for example V(/dev/sda, /dev/sdb)). + - List of physical volumes (for example V(/dev/sda, /dev/sdb)). thinpool: type: str description: - - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. + - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. shrink: description: - - Shrink if current size is higher than size requested. + - Shrink if current size is higher than size requested. type: bool default: true resizefs: description: - - Resize the underlying filesystem together with the logical volume. - - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. - Attempts to resize other filesystem types will fail. + - Resize the underlying filesystem together with the logical volume. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types will fail. type: bool default: false notes: - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a logical volume of 512m community.general.lvol: vg: firefly @@ -233,7 +226,7 @@ EXAMPLES = ''' lv: test thinpool: testpool size: 128g -''' +""" import re import shlex diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py index 2d768eaafd..9b44c4fc89 100644 --- a/plugins/modules/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: lxc_container short_description: Manage LXC Containers description: @@ -19,183 +18,171 @@ author: "Kevin Carter (@cloudnull)" extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of a container. - type: str - required: true - backing_store: - choices: - - dir - - lvm - - loop - - btrfs - - overlayfs - - zfs - description: - - Backend storage type for the container. - type: str - default: dir - template: - description: - - Name of the template to use within an LXC create. - type: str - default: ubuntu - template_options: - description: - - Template options when building the container. - type: str - config: - description: - - Path to the LXC configuration file. - type: path - lv_name: - description: - - Name of the logical volume, defaults to the container name. - - If not specified, it defaults to C($CONTAINER_NAME). - type: str - vg_name: - description: - - If backend store is lvm, specify the name of the volume group. - type: str - default: lxc - thinpool: - description: - - Use LVM thin pool called TP. - type: str - fs_type: - description: - - Create fstype TYPE. - type: str - default: ext4 - fs_size: - description: - - File system Size. - type: str - default: 5G - directory: - description: - - Place rootfs directory under DIR. - type: path - zfs_root: - description: - - Create zfs under given zfsroot. - type: str - container_command: - description: - - Run a command within a container. - type: str - lxc_path: - description: - - Place container under E(PATH). - type: path - container_log: - description: - - Enable a container log for host actions to the container. - type: bool - default: false - container_log_level: - choices: - - Info - - info - - INFO - - Error - - error - - ERROR - - Debug - - debug - - DEBUG - description: - - Set the log level for a container where O(container_log) was set. - type: str - required: false - default: INFO - clone_name: - description: - - Name of the new cloned server. - - This is only used when state is clone. - type: str - clone_snapshot: - description: - - Create a snapshot a container when cloning. - - This is not supported by all container storage backends. - - Enabling this may fail if the backing store does not support snapshots. - type: bool - default: false - archive: - description: - - Create an archive of a container. - - This will create a tarball of the running container. - type: bool - default: false - archive_path: - description: - - Path the save the archived container. - - If the path does not exist the archive method will attempt to create it. - type: path - archive_compression: - choices: - - gzip - - bzip2 - - none - description: - - Type of compression to use when creating an archive of a running - container. - type: str - default: gzip - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - - clone - description: - - Define the state of a container. - - If you clone a container using O(clone_name) the newly cloned - container created in a stopped state. - - The running container will be stopped while the clone operation is - happening and upon completion of the clone the original container - state will be restored. - type: str - default: started - container_config: - description: - - A list of C(key=value) options to use when configuring a container. - type: list - elements: str + name: + description: + - Name of a container. + type: str + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + - overlayfs + - zfs + description: + - Backend storage type for the container. + type: str + default: dir + template: + description: + - Name of the template to use within an LXC create. + type: str + default: ubuntu + template_options: + description: + - Template options when building the container. + type: str + config: + description: + - Path to the LXC configuration file. + type: path + lv_name: + description: + - Name of the logical volume, defaults to the container name. + - If not specified, it defaults to E(CONTAINER_NAME). + type: str + vg_name: + description: + - If backend store is lvm, specify the name of the volume group. + type: str + default: lxc + thinpool: + description: + - Use LVM thin pool called TP. + type: str + fs_type: + description: + - Create fstype TYPE. + type: str + default: ext4 + fs_size: + description: + - File system Size. + type: str + default: 5G + directory: + description: + - Place rootfs directory under DIR. + type: path + zfs_root: + description: + - Create zfs under given zfsroot. + type: str + container_command: + description: + - Run a command within a container. + type: str + lxc_path: + description: + - Place container under E(PATH). + type: path + container_log: + description: + - Enable a container log for host actions to the container. + type: bool + default: false + container_log_level: + choices: + - Info + - info + - INFO + - Error + - error + - ERROR + - Debug + - debug + - DEBUG + description: + - Set the log level for a container where O(container_log) was set. + type: str + required: false + default: INFO + clone_name: + description: + - Name of the new cloned server. + - This is only used when state is clone. + type: str + clone_snapshot: + description: + - Create a snapshot a container when cloning. + - This is not supported by all container storage backends. + - Enabling this may fail if the backing store does not support snapshots. + type: bool + default: false + archive: + description: + - Create an archive of a container. + - This will create a tarball of the running container. + type: bool + default: false + archive_path: + description: + - Path the save the archived container. + - If the path does not exist the archive method will attempt to create it. + type: path + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running container. + type: str + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + - clone + description: + - Define the state of a container. + - If you clone a container using O(clone_name) the newly cloned container created in a stopped state. + - The running container will be stopped while the clone operation is happening and upon completion of the clone the original container state + will be restored. + type: str + default: started + container_config: + description: + - A list of C(key=value) options to use when configuring a container. + type: list + elements: str requirements: - 'lxc >= 2.0 # OS package' - 'python3 >= 3.5 # OS Package' - 'python3-lxc # OS Package' notes: - - Containers must have a unique name. If you attempt to create a container - with a name that already exists in the users namespace the module will - simply return as "unchanged". - - The O(container_command) can be used with any state except V(absent). If - used with state V(stopped) the container will be V(started), the command - executed, and then the container V(stopped) again. Likewise if O(state=stopped) - and the container does not exist it will be first created, - V(started), the command executed, and then V(stopped). If you use a "|" - in the variable you can use common script formatting within the variable - itself. The O(container_command) option will always execute as BASH. - When using O(container_command), a log file is created in the C(/tmp/) directory - which contains both C(stdout) and C(stderr) of any command executed. - - If O(archive=true) the system will attempt to create a compressed - tarball of the running container. The O(archive) option supports LVM backed - containers and will create a snapshot of the running container when - creating the archive. - - If your distro does not have a package for C(python3-lxc), which is a - requirement for this module, it can be installed from source at - U(https://github.com/lxc/python3-lxc) or installed via pip using the - package name C(lxc). -''' + - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module + will simply return as "unchanged". + - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container will be V(started), the + command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not exist it will be first + created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you can use common script formatting within + the variable itself. The O(container_command) option will always execute as C(bash). When using O(container_command), a log file is created in + the C(/tmp/) directory which contains both C(stdout) and C(stderr) of any command executed. + - If O(archive=true) the system will attempt to create a compressed tarball of the running container. The O(archive) option supports LVM backed + containers and will create a snapshot of the running container when creating the archive. + - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed from source at + U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc). +""" EXAMPLES = r""" - name: Create a started container @@ -382,45 +369,45 @@ EXAMPLES = r""" RETURN = r""" lxc_container: - description: container information - returned: success - type: complex - contains: - name: - description: name of the lxc container - returned: success - type: str - sample: test_host - init_pid: - description: pid of the lxc init process - returned: success - type: int - sample: 19786 - interfaces: - description: list of the container's network interfaces - returned: success - type: list - sample: [ "eth0", "lo" ] - ips: - description: list of ips - returned: success - type: list - sample: [ "10.0.3.3" ] - state: - description: resulting state of the container - returned: success - type: str - sample: "running" - archive: - description: resulting state of the container - returned: success, when archive is true - type: str - sample: "/tmp/test-container-config.tar" - clone: - description: if the container was cloned - returned: success, when clone_name is specified - type: bool - sample: true + description: Container information. + returned: success + type: complex + contains: + name: + description: Name of the lxc container. + returned: success + type: str + sample: test_host + init_pid: + description: Pid of the lxc init process. + returned: success + type: int + sample: 19786 + interfaces: + description: List of the container's network interfaces. + returned: success + type: list + sample: ["eth0", "lo"] + ips: + description: List of ips. + returned: success + type: list + sample: ["10.0.3.3"] + state: + description: Resulting state of the container. + returned: success + type: str + sample: "running" + archive: + description: Resulting state of the container. + returned: success, when archive is true + type: str + sample: "/tmp/test-container-config.tar" + clone: + description: If the container was cloned. + returned: success, when clone_name is specified + type: bool + sample: true """ import os diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py index 1f811a7efa..8ece67470b 100644 --- a/plugins/modules/lxca_cmms.py +++ b/plugins/modules/lxca_cmms.py @@ -8,16 +8,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) module: lxca_cmms short_description: Custom module for lxca cmms inventory utility description: - - This module returns/displays a inventory details of cmms - + - This module returns/displays a inventory details of cmms. attributes: check_mode: support: none @@ -26,32 +24,28 @@ attributes: options: uuid: - description: - uuid of device, this is string with length greater than 16. + description: UUID of device, this is string with length greater than 16. type: str command_options: - description: - options to filter nodes information + description: Options to filter nodes information. default: cmms choices: - - cmms - - cmms_by_uuid - - cmms_by_chassis_uuid + - cmms + - cmms_by_uuid + - cmms_by_chassis_uuid type: str chassis: - description: - uuid of chassis, this is string with length greater than 16. + description: UUID of chassis, this is string with length greater than 16. type: str extends_documentation_fragment: - community.general.lxca_common - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # get all cmms info - name: Get nodes data from LXCA community.general.lxca_cmms: @@ -76,28 +70,27 @@ EXAMPLES = ''' auth_url: "https://10.243.15.168" chassis: "3C737AA5E31640CE949B10C129A8B01F" command_options: cmms_by_chassis_uuid +""" -''' - -RETURN = r''' +RETURN = r""" result: - description: cmms detail from lxca - returned: success - type: dict - sample: - cmmList: - - machineType: '' - model: '' - type: 'CMM' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + description: Cmms detail from lxca. + returned: success + type: dict + sample: + cmmList: + - machineType: '' + model: '' + type: 'CMM' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties - - machineType: '' - model: '' - type: 'CMM' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + - machineType: '' + model: '' + type: 'CMM' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties # Multiple cmms details -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py index 3b37322edb..f133671114 100644 --- a/plugins/modules/lxca_nodes.py +++ b/plugins/modules/lxca_nodes.py @@ -8,16 +8,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) module: lxca_nodes short_description: Custom module for lxca nodes inventory utility description: - - This module returns/displays a inventory details of nodes - + - This module returns/displays a inventory details of nodes. attributes: check_mode: support: none @@ -26,34 +24,30 @@ attributes: options: uuid: - description: - uuid of device, this is string with length greater than 16. + description: UUID of device, this is string with length greater than 16. type: str command_options: - description: - options to filter nodes information + description: Options to filter nodes information. default: nodes choices: - - nodes - - nodes_by_uuid - - nodes_by_chassis_uuid - - nodes_status_managed - - nodes_status_unmanaged + - nodes + - nodes_by_uuid + - nodes_by_chassis_uuid + - nodes_status_managed + - nodes_status_unmanaged type: str chassis: - description: - uuid of chassis, this is string with length greater than 16. + description: UUID of chassis, this is string with length greater than 16. type: str extends_documentation_fragment: - community.general.lxca_common - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # get all nodes info - name: Get nodes data from LXCA community.general.lxca_nodes: @@ -95,28 +89,27 @@ EXAMPLES = ''' login_password: Password auth_url: "https://10.243.15.168" command_options: nodes_status_unmanaged +""" -''' - -RETURN = r''' +RETURN = r""" result: - description: nodes detail from lxca - returned: always - type: dict - sample: - nodeList: - - machineType: '6241' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + description: Nodes detail from lxca. + returned: always + type: dict + sample: + nodeList: + - machineType: '6241' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties - - machineType: '8871' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + - machineType: '8871' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties # Multiple nodes details -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index 5c5d8a4d8d..3f0ab2607e 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_container short_description: Manage LXD instances description: @@ -19,198 +18,180 @@ author: "Hiroaki Nakamura (@hnakamur)" extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - version_added: 6.4.0 - diff_mode: - support: full - version_added: 6.4.0 + check_mode: + support: full + version_added: 6.4.0 + diff_mode: + support: full + version_added: 6.4.0 options: - name: - description: - - Name of an instance. - type: str - required: true - project: - description: - - 'Project of an instance. - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).' - required: false - type: str - version_added: 4.8.0 - architecture: - description: - - 'The architecture for the instance (for example V(x86_64) or V(i686)). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' - type: str - required: false - config: - description: - - 'The config for the instance (for example V({"limits.cpu": "2"})). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' - - If the instance already exists and its "config" values in metadata - obtained from the LXD API U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get) - are different, then this module tries to apply the configurations - U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put). - - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true). - type: dict - required: false - ignore_volatile_options: - description: - - If set to V(true), options starting with C(volatile.) are ignored. As a result, - they are reapplied for each execution. - - This default behavior can be changed by setting this option to V(false). - - The default value changed from V(true) to V(false) in community.general 6.0.0. - type: bool - required: false - default: false - version_added: 3.7.0 - profiles: - description: - - Profile to be used by the instance. - type: list - elements: str - devices: - description: - - 'The devices for the instance - (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' - type: dict - required: false - ephemeral: - description: - - Whether or not the instance is ephemeral (for example V(true) or V(false)). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). - required: false - type: bool - source: - description: - - 'The source for the instance - (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/", - "protocol": "simplestreams", "alias": "22.04" })).' - - 'See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.' - - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).' - required: false - type: dict - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - description: - - Define the state of an instance. - required: false - default: started - type: str - target: - description: - - For cluster deployments. Will attempt to create an instance on a target node. - If the instance exists elsewhere in a cluster, then it will not be replaced or moved. - The name should respond to same name of the node you see in C(lxc cluster list). - type: str - required: false - version_added: 1.0.0 - timeout: - description: - - A timeout for changing the state of the instance. - - This is also used as a timeout for waiting until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: 30 - type: int - type: - description: - - Instance type can be either V(virtual-machine) or V(container). - required: false - default: container - choices: - - container - - virtual-machine - type: str - version_added: 4.1.0 - wait_for_ipv4_addresses: - description: - - If this is V(true), the C(lxd_container) waits until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: false - type: bool - wait_for_container: - description: - - If set to V(true), the tasks will wait till the task reports a - success status when performing container operations. - default: false - type: bool - version_added: 4.4.0 - force_stop: - description: - - If this is V(true), the C(lxd_container) forces to stop the instance - when it stops or restarts the instance. - required: false - default: false - type: bool - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ). - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str + name: + description: + - Name of an instance. + type: str + required: true + project: + description: + - Project of an instance. + - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + required: false + type: str + version_added: 4.8.0 + architecture: + description: + - The architecture for the instance (for example V(x86_64) or V(i686)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + type: str + required: false + config: + description: + - 'The config for the instance (for example V({"limits.cpu": "2"})). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' + - If the instance already exists and its "config" values in metadata obtained from the LXD API + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put). + - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true). + type: dict + required: false + ignore_volatile_options: + description: + - If set to V(true), options starting with C(volatile.) are ignored. As a result, they are reapplied for each execution. + - This default behavior can be changed by setting this option to V(false). + - The default value changed from V(true) to V(false) in community.general 6.0.0. + type: bool + required: false + default: false + version_added: 3.7.0 + profiles: + description: + - Profile to be used by the instance. + type: list + elements: str + devices: + description: + - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' + type: dict + required: false + ephemeral: + description: + - Whether or not the instance is ephemeral (for example V(true) or V(false)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + required: false + type: bool + source: + description: + - 'The source for the instance (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/", + "protocol": "simplestreams", "alias": "22.04" })).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation. + - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).' + required: false + type: dict + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of an instance. + required: false + default: started + type: str + target: + description: + - For cluster deployments. Will attempt to create an instance on a target node. If the instance exists elsewhere in a cluster, then it will + not be replaced or moved. The name should respond to same name of the node you see in C(lxc cluster list). + type: str + required: false + version_added: 1.0.0 + timeout: + description: + - A timeout for changing the state of the instance. + - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance after starting or + restarting. + required: false + default: 30 + type: int + type: + description: + - Instance type can be either V(virtual-machine) or V(container). + required: false + default: container + choices: + - container + - virtual-machine + type: str + version_added: 4.1.0 + wait_for_ipv4_addresses: + description: + - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance after starting + or restarting. + required: false + default: false + type: bool + wait_for_container: + description: + - If set to V(true), the tasks will wait till the task reports a success status when performing container operations. + default: false + type: bool + version_added: 4.4.0 + force_stop: + description: + - If this is V(true), the C(lxd_container) forces to stop the instance when it stops or restarts the instance. + required: false + default: false + type: bool + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config set core.trust_password + ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If trust_password is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance - with a name that already existed in the users namespace the module will - simply return as "unchanged". - - There are two ways to run commands inside a container or virtual machine, using the command - module or using the ansible lxd connection plugin bundled in Ansible >= - 2.1, the later requires python to be installed in the instance which can - be done with the command module. - - You can copy a file from the host to the instance - with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module - and the P(community.general.lxd#connection) connection plugin. - See the example below. - - You can copy a file in the created instance to the localhost - with C(command=lxc file pull instance_name/dir/filename filename). - See the first example below. - - linuxcontainers.org has phased out LXC/LXD support with March 2024 + - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance with a name that + already existed in the users namespace the module will simply return as "unchanged". + - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible lxd connection plugin + bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be done with the command module. + - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the + P(community.general.lxd#connection) connection plugin. See the example below. + - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). See the first + example below. + - Linuxcontainers.org has phased out LXC/LXD support with March 2024 (U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)). Currently only Ubuntu is still providing images. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a Ubuntu container and install python - hosts: localhost connection: local @@ -279,7 +260,7 @@ EXAMPLES = ''' source: type: image mode: pull - # Provides Ubuntu minimal images + # Provides Ubuntu minimal images server: https://cloud-images.ubuntu.com/minimal/releases/ protocol: simplestreams alias: "22.04" @@ -400,12 +381,12 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: [...] # URL to the image server + server: ['...'] # URL to the image server alias: debian/11 timeout: 600 -''' +""" -RETURN = ''' +RETURN = r""" addresses: description: Mapping from the network device name to a list of IPv4 addresses in the instance. returned: when state is started or restarted @@ -426,7 +407,8 @@ actions: returned: success type: list sample: ["create", "start"] -''' +""" + import copy import datetime import os diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py index 13660fd91d..c46559298b 100644 --- a/plugins/modules/lxd_profile.py +++ b/plugins/modules/lxd_profile.py @@ -9,126 +9,114 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_profile short_description: Manage LXD profiles description: - - Management of LXD profiles + - Management of LXD profiles. author: "Hiroaki Nakamura (@hnakamur)" extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of a profile. - required: true - type: str - project: - description: - - 'Project of a profile. - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).' - type: str - required: false - version_added: 4.8.0 + name: description: - description: - - Description of the profile. - type: str - config: - description: - - 'The config for the instance (e.g. {"limits.memory": "4GB"}). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' - - If the profile already exists and its "config" value in metadata - obtained from - GET /1.0/profiles/ - U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get) - are different, then this module tries to apply the configurations - U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put). - - Not all config values are supported to apply the existing profile. - Maybe you need to delete and recreate a profile. - required: false - type: dict - devices: - description: - - 'The devices for the profile - (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' - required: false - type: dict - new_name: - description: - - A new name of a profile. - - If this parameter is specified a profile will be renamed to this name. - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post). - required: false - type: str - merge_profile: - description: - - Merge the configuration of the present profile with the new desired configuration, - instead of replacing it. - required: false - default: false - type: bool - version_added: 2.1.0 - state: - choices: - - present - - absent - description: - - Define the state of a profile. - required: false - default: present - type: str - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the LXD server before - running this module using the following command. - lxc config set core.trust_password - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str + - Name of a profile. + required: true + type: str + project: + description: + - Project of a profile. See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + type: str + required: false + version_added: 4.8.0 + description: + description: + - Description of the profile. + type: str + config: + description: + - 'The config for the instance (for example V({"limits.memory": "4GB"})). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' + - If the profile already exists and its C(config) value in metadata obtained from GET /1.0/profiles/ + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put). + - Not all config values are supported to apply the existing profile. Maybe you need to delete and recreate a profile. + required: false + type: dict + devices: + description: + - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' + required: false + type: dict + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile will be renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post). + required: false + type: str + merge_profile: + description: + - Merge the configuration of the present profile with the new desired configuration, instead of replacing it. + required: false + default: false + type: bool + version_added: 2.1.0 + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + type: str + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before running this module using the following command. lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). + - If trust_password is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - - Profiles must have a unique name. If you attempt to create a profile - with a name that already existed in the users namespace the module will + - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace the module will simply return as "unchanged". -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a profile - hosts: localhost connection: local @@ -162,22 +150,22 @@ EXAMPLES = ''' - hosts: localhost connection: local tasks: - - name: Create macvlan profile - community.general.lxd_profile: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic + - name: Create macvlan profile + community.general.lxd_profile: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic # An example for modify/merge a profile - hosts: localhost @@ -214,11 +202,11 @@ EXAMPLES = ''' name: macvlan new_name: macvlan2 state: present -''' +""" -RETURN = ''' +RETURN = r""" old_state: - description: The old state of the profile + description: The old state of the profile. returned: success type: str sample: "absent" @@ -232,7 +220,7 @@ actions: returned: success type: list sample: ["create"] -''' +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py index 0d321808a2..ee90b88168 100644 --- a/plugins/modules/lxd_project.py +++ b/plugins/modules/lxd_project.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_project short_description: Manage LXD projects version_added: 4.8.0 @@ -18,98 +17,91 @@ author: "Raymond Chang (@we10710aa)" extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of the project. - required: true - type: str + name: description: - description: - - Description of the project. - type: str - config: - description: - - 'The config for the project (for example V({"features.profiles": "true"})). - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).' - - If the project already exists and its "config" value in metadata - obtained from - C(GET /1.0/projects/) - U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get) - are different, then this module tries to apply the configurations - U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put). - type: dict - new_name: - description: - - A new name of a project. - - If this parameter is specified a project will be renamed to this name. - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post). - required: false - type: str - merge_project: - description: - - Merge the configuration of the present project with the new desired configuration, - instead of replacing it. If configuration is the same after merged, no change will be made. - required: false - default: false - type: bool - state: - choices: - - present - - absent - description: - - Define the state of a project. - required: false - default: present - type: str - url: - description: - - The Unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The Unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If O(trust_password) is set, this module send a request for - authentication before sending any requests. - required: false - type: str + - Name of the project. + required: true + type: str + description: + description: + - Description of the project. + type: str + config: + description: + - 'The config for the project (for example V({"features.profiles": "true"})). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).' + - If the project already exists and its "config" value in metadata obtained from C(GET /1.0/projects/) + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put). + type: dict + new_name: + description: + - A new name of a project. + - If this parameter is specified a project will be renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post). + required: false + type: str + merge_project: + description: + - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration is the same + after merged, no change will be made. + required: false + default: false + type: bool + state: + choices: + - present + - absent + description: + - Define the state of a project. + required: false + default: present + type: str + url: + description: + - The Unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The Unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config set core.trust_password + ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - - Projects must have a unique name. If you attempt to create a project - with a name that already existed in the users namespace the module will + - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace the module will simply return as "unchanged". -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a project - hosts: localhost connection: local @@ -132,9 +124,9 @@ EXAMPLES = ''' state: present config: {} description: my new project -''' +""" -RETURN = ''' +RETURN = r""" old_state: description: The old state of the project. returned: success @@ -184,7 +176,7 @@ actions: type: list elements: str sample: ["create"] -''' +""" from ansible_collections.community.general.plugins.module_utils.lxd import ( LXDClient, LXDClientException, default_key_file, default_cert_file From a99f72fc367410e20ea44df4aa3640448022ce2d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 27 Dec 2024 01:40:05 +1300 Subject: [PATCH 414/482] [ip ... j]*.py: normalize docs (#9392) * [ip ... j]*.py: normalize docs * Update plugins/modules/ip_netns.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/ip_netns.py | 51 +++--- plugins/modules/ipa_config.py | 14 +- plugins/modules/ipa_dnsrecord.py | 82 ++++----- plugins/modules/ipa_dnszone.py | 20 +- plugins/modules/ipa_getkeytab.py | 12 +- plugins/modules/ipa_group.py | 110 ++++++----- plugins/modules/ipa_hbacrule.py | 84 +++++---- plugins/modules/ipa_host.py | 60 +++--- plugins/modules/ipa_hostgroup.py | 51 +++--- plugins/modules/ipa_otpconfig.py | 14 +- plugins/modules/ipa_otptoken.py | 63 ++++--- plugins/modules/ipa_pwpolicy.py | 251 +++++++++++++------------- plugins/modules/ipa_role.py | 84 +++++---- plugins/modules/ipa_service.py | 32 ++-- plugins/modules/ipa_subca.py | 26 ++- plugins/modules/ipa_sudocmd.py | 22 +-- plugins/modules/ipa_sudocmdgroup.py | 30 ++- plugins/modules/ipa_sudorule.py | 111 ++++++------ plugins/modules/ipa_user.py | 83 +++++---- plugins/modules/ipa_vault.py | 140 +++++++------- plugins/modules/ipbase_info.py | 29 ++- plugins/modules/ipify_facts.py | 16 +- plugins/modules/ipinfoio_facts.py | 39 ++-- plugins/modules/ipmi_boot.py | 70 ++++--- plugins/modules/ipmi_power.py | 73 ++++---- plugins/modules/iptables_state.py | 75 +++----- plugins/modules/ipwcli_dns.py | 217 +++++++++++----------- plugins/modules/irc.py | 61 +++---- plugins/modules/iso_create.py | 173 +++++++++--------- plugins/modules/iso_customize.py | 36 ++-- plugins/modules/iso_extract.py | 58 +++--- plugins/modules/jabber.py | 27 ++- plugins/modules/java_cert.py | 30 ++- plugins/modules/java_keystore.py | 67 +++---- plugins/modules/jboss.py | 20 +- plugins/modules/jenkins_build.py | 18 +- plugins/modules/jenkins_build_info.py | 18 +- plugins/modules/jenkins_job.py | 25 ++- plugins/modules/jenkins_job_info.py | 34 ++-- plugins/modules/jenkins_node.py | 34 ++-- plugins/modules/jenkins_plugin.py | 76 ++++---- plugins/modules/jenkins_script.py | 59 +++--- plugins/modules/jira.py | 124 ++++++------- 43 files changed, 1290 insertions(+), 1429 deletions(-) diff --git a/plugins/modules/ip_netns.py b/plugins/modules/ip_netns.py index 69534c810d..6bcae8e5f2 100644 --- a/plugins/modules/ip_netns.py +++ b/plugins/modules/ip_netns.py @@ -7,37 +7,36 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ip_netns author: "Arie Bregman (@bregman-arie)" short_description: Manage network namespaces -requirements: [ ip ] +requirements: [ip] description: - - Create or delete network namespaces using the ip command. + - Create or delete network namespaces using the C(ip) command. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - required: false - description: - - Name of the namespace - type: str - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the namespace should exist - type: str -''' + name: + required: false + description: + - Name of the namespace. + type: str + state: + required: false + default: "present" + choices: [present, absent] + description: + - Whether the namespace should exist. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a namespace named mario community.general.ip_netns: name: mario @@ -47,11 +46,11 @@ EXAMPLES = ''' community.general.ip_netns: name: luigi state: absent -''' +""" -RETURN = ''' +RETURN = r""" # Default return values -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_text diff --git a/plugins/modules/ipa_config.py b/plugins/modules/ipa_config.py index 871643fd7b..ea08f8f8ba 100644 --- a/plugins/modules/ipa_config.py +++ b/plugins/modules/ipa_config.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_config author: Fran Fitzpatrick (@fxfitz) short_description: Manage Global FreeIPA Configuration Settings @@ -115,10 +114,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled community.general.ipa_config: ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] @@ -221,14 +219,14 @@ EXAMPLES = r''' ipa_host: localhost ipa_user: admin ipa_pass: supersecret -''' +""" -RETURN = r''' +RETURN = r""" config: description: Configuration as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index 1dad138377..d92e2c4f66 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_dnsrecord author: Abhijeet Kasurde (@Akasurde) short_description: Manage FreeIPA DNS records @@ -23,64 +22,66 @@ attributes: options: zone_name: description: - - The DNS zone name to which DNS record needs to be managed. + - The DNS zone name to which DNS record needs to be managed. required: true type: str record_name: description: - - The DNS record name to manage. + - The DNS record name to manage. required: true aliases: ["name"] type: str record_type: description: - - The type of DNS record name. - - Support for V(NS) was added in comunity.general 8.2.0. - - Support for V(SSHFP) was added in community.general 9.1.0. + - The type of DNS record name. + - Support for V(NS) was added in comunity.general 8.2.0. + - Support for V(SSHFP) was added in community.general 9.1.0. required: false default: 'A' choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] type: str record_value: description: - - Manage DNS record name with this value. - - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. - - Use O(record_values) if you need to specify multiple values. - - In the case of V(A) or V(AAAA) record types, this will be the IP address. - - In the case of V(A6) record type, this will be the A6 Record data. - - In the case of V(CNAME) record type, this will be the hostname. - - In the case of V(DNAME) record type, this will be the DNAME target. - - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of V(PTR) record type, this will be the hostname. - - In the case of V(TXT) record type, this will be a text. - - In the case of V(SRV) record type, this will be a service record. - - In the case of V(MX) record type, this will be a mail exchanger record. - - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. + - Manage DNS record name with this value. + - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. + - Use O(record_values) if you need to specify multiple values. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA + record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: str record_values: description: - - Manage DNS record name with this value. - - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. - - In the case of V(A) or V(AAAA) record types, this will be the IP address. - - In the case of V(A6) record type, this will be the A6 Record data. - - In the case of V(CNAME) record type, this will be the hostname. - - In the case of V(DNAME) record type, this will be the DNAME target. - - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of V(PTR) record type, this will be the hostname. - - In the case of V(TXT) record type, this will be a text. - - In the case of V(SRV) record type, this will be a service record. - - In the case of V(MX) record type, this will be a mail exchanger record. - - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. + - Manage DNS record name with this value. + - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA + record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: list elements: str record_ttl: description: - - Set the TTL for the record. - - Applies only when adding a new or changing the value of O(record_value) or O(record_values). + - Set the TTL for the record. + - Applies only when adding a new or changing the value of O(record_value) or O(record_values). required: false type: int state: - description: State to ensure + description: State to ensure. required: false default: present choices: ["absent", "present"] @@ -88,10 +89,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure dns record is present community.general.ipa_dnsrecord: ipa_host: spider.example.com @@ -189,14 +189,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: ChangeMe! -''' +""" -RETURN = r''' +RETURN = r""" dnsrecord: description: DNS record as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py index 6699b0525b..b536c258d2 100644 --- a/plugins/modules/ipa_dnszone.py +++ b/plugins/modules/ipa_dnszone.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_dnszone author: Fran Fitzpatrick (@fxfitz) short_description: Manage FreeIPA DNS Zones description: - - Add and delete an IPA DNS Zones using IPA API + - Add and delete an IPA DNS Zones using IPA API. attributes: check_mode: support: full @@ -23,11 +22,11 @@ attributes: options: zone_name: description: - - The DNS zone name to which needs to be managed. + - The DNS zone name to which needs to be managed. required: true type: str state: - description: State to ensure + description: State to ensure. required: false default: present choices: ["absent", "present"] @@ -44,10 +43,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure dns zone is present community.general.ipa_dnszone: ipa_host: spider.example.com @@ -78,14 +76,14 @@ EXAMPLES = r''' state: present zone_name: example.com allowsyncptr: true -''' +""" -RETURN = r''' +RETURN = r""" zone: description: DNS zone as returned by IPA API. returned: always type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py index 643e18cf62..dfd612564b 100644 --- a/plugins/modules/ipa_getkeytab.py +++ b/plugins/modules/ipa_getkeytab.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_getkeytab short_description: Manage keytab file in FreeIPA version_added: 9.5.0 @@ -82,7 +81,8 @@ options: state: description: - The state of the keytab file. - - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set O(force=true). + - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set + O(force=true). type: str default: present choices: ["present", "absent"] @@ -95,9 +95,9 @@ requirements: - Managed host is FreeIPA client extends_documentation_fragment: - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get Kerberos ticket using default principal community.general.krb_ticket: password: "{{ aldpro_admin_password }}" @@ -121,7 +121,7 @@ EXAMPLES = r''' principal: HTTP/freeipa-dc02.ipa.test ipa_host: freeipa-dc01.ipa.test force: true -''' +""" import os diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py index 92470606fc..60077a2c6a 100644 --- a/plugins/modules/ipa_group.py +++ b/plugins/modules/ipa_group.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_group author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA group description: - - Add, modify and delete group within IPA server + - Add, modify and delete group within IPA server. attributes: check_mode: support: full @@ -22,77 +21,76 @@ attributes: options: append: description: - - If V(true), add the listed O(user) and O(group) to the group members. - - If V(false), only the listed O(user) and O(group) will be group members, removing any other members. + - If V(true), add the listed O(user) and O(group) to the group members. + - If V(false), only the listed O(user) and O(group) will be group members, removing any other members. default: false type: bool version_added: 4.0.0 cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str description: description: - - Description of the group. + - Description of the group. type: str external: description: - - Allow adding external non-IPA members from trusted domains. + - Allow adding external non-IPA members from trusted domains. type: bool gidnumber: description: - - GID (use this option to set it manually). + - GID (use this option to set it manually). aliases: ['gid'] type: str group: description: - - List of group names assigned to this group. - - If O(append=false) and an empty list is passed all groups will be removed from this group. - - Groups that are already assigned but not passed will be removed. - - If O(append=true) the listed groups will be assigned without removing other groups. - - If option is omitted assigned groups will not be checked or changed. + - List of group names assigned to this group. + - If O(append=false) and an empty list is passed all groups will be removed from this group. + - Groups that are already assigned but not passed will be removed. + - If O(append=true) the listed groups will be assigned without removing other groups. + - If option is omitted assigned groups will not be checked or changed. type: list elements: str nonposix: description: - - Create as a non-POSIX group. + - Create as a non-POSIX group. type: bool user: description: - - List of user names assigned to this group. - - If O(append=false) and an empty list is passed all users will be removed from this group. - - Users that are already assigned but not passed will be removed. - - If O(append=true) the listed users will be assigned without removing other users. - - If option is omitted assigned users will not be checked or changed. + - List of user names assigned to this group. + - If O(append=false) and an empty list is passed all users will be removed from this group. + - Users that are already assigned but not passed will be removed. + - If O(append=true) the listed users will be assigned without removing other users. + - If option is omitted assigned users will not be checked or changed. type: list elements: str external_user: description: - - List of external users assigned to this group. - - Behaves identically to O(user) with respect to O(append) attribute. - - List entries can be in V(DOMAIN\\\\username) or SID format. - - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users. - This is because only SIDs are returned by IPA query. - - O(external=true) is needed for this option to work. + - List of external users assigned to this group. + - Behaves identically to O(user) with respect to O(append) attribute. + - List entries can be in V(DOMAIN\\\\username) or SID format. + - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users. + This is because only SIDs are returned by IPA query. + - O(external=true) is needed for this option to work. type: list elements: str version_added: 6.3.0 state: description: - - State to ensure + - State to ensure. default: "present" choices: ["absent", "present"] type: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure group is present community.general.ipa_group: name: oinstall @@ -106,8 +104,8 @@ EXAMPLES = r''' community.general.ipa_group: name: ops group: - - sysops - - appops + - sysops + - appops ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -116,8 +114,8 @@ EXAMPLES = r''' community.general.ipa_group: name: sysops user: - - linus - - larry + - linus + - larry ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -126,7 +124,7 @@ EXAMPLES = r''' community.general.ipa_group: name: developers user: - - john + - john append: true state: present ipa_host: ipa.example.com @@ -135,25 +133,25 @@ EXAMPLES = r''' - name: Add external user to a group community.general.ipa_group: - name: developers - external: true - append: true - external_user: - - S-1-5-21-123-1234-12345-63421 - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret + name: developers + external: true + append: true + external_user: + - S-1-5-21-123-1234-12345-63421 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret - name: Add a user from MYDOMAIN community.general.ipa_group: - name: developers - external: true - append: true - external_user: - - MYDOMAIN\\john - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret + name: developers + external: true + append: true + external_user: + - MYDOMAIN\\john + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret - name: Ensure group is absent community.general.ipa_group: @@ -162,14 +160,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" group: - description: Group as returned by IPA API + description: Group as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py index 77a4d0d487..d168a3a7e0 100644 --- a/plugins/modules/ipa_hbacrule.py +++ b/plugins/modules/ipa_hbacrule.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_hbacrule author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA HBAC rule @@ -22,99 +21,98 @@ attributes: options: cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: - description: Description + description: Description. type: str host: description: - - List of host names to assign. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. + - List of host names to assign. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. required: false type: list elements: str hostcategory: - description: Host category + description: Host category. choices: ['all'] type: str hostgroup: description: - - List of hostgroup names to assign. - - If an empty list is passed all hostgroups will be removed. from the rule - - If option is omitted hostgroups will not be checked or changed. + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups will be removed from the rule. + - If option is omitted hostgroups will not be checked or changed. type: list elements: str service: description: - - List of service names to assign. - - If an empty list is passed all services will be removed from the rule. - - If option is omitted services will not be checked or changed. + - List of service names to assign. + - If an empty list is passed all services will be removed from the rule. + - If option is omitted services will not be checked or changed. type: list elements: str servicecategory: - description: Service category + description: Service category. choices: ['all'] type: str servicegroup: description: - - List of service group names to assign. - - If an empty list is passed all assigned service groups will be removed from the rule. - - If option is omitted service groups will not be checked or changed. + - List of service group names to assign. + - If an empty list is passed all assigned service groups will be removed from the rule. + - If option is omitted service groups will not be checked or changed. type: list elements: str sourcehost: description: - - List of source host names to assign. - - If an empty list if passed all assigned source hosts will be removed from the rule. - - If option is omitted source hosts will not be checked or changed. + - List of source host names to assign. + - If an empty list if passed all assigned source hosts will be removed from the rule. + - If option is omitted source hosts will not be checked or changed. type: list elements: str sourcehostcategory: - description: Source host category + description: Source host category. choices: ['all'] type: str sourcehostgroup: description: - - List of source host group names to assign. - - If an empty list if passed all assigned source host groups will be removed from the rule. - - If option is omitted source host groups will not be checked or changed. + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups will be removed from the rule. + - If option is omitted source host groups will not be checked or changed. type: list elements: str state: - description: State to ensure + description: State to ensure. default: "present" - choices: ["absent", "disabled", "enabled","present"] + choices: ["absent", "disabled", "enabled", "present"] type: str user: description: - - List of user names to assign. - - If an empty list if passed all assigned users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - List of user names to assign. + - If an empty list if passed all assigned users will be removed from the rule. + - If option is omitted users will not be checked or changed. type: list elements: str usercategory: - description: User category + description: User category. choices: ['all'] type: str usergroup: description: - - List of user group names to assign. - - If an empty list if passed all assigned user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - List of user group names to assign. + - If an empty list if passed all assigned user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. type: list elements: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure rule to allow all users to access any host from any host community.general.ipa_hbacrule: name: allow_all @@ -132,9 +130,9 @@ EXAMPLES = r''' name: allow_all_developers_access_to_db description: Allow all developers to access any database from any host hostgroup: - - db-server + - db-server usergroup: - - developers + - developers state: present ipa_host: ipa.example.com ipa_user: admin @@ -147,14 +145,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" hbacrule: description: HBAC rule as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py index 791cee91f3..b2f76ac8f3 100644 --- a/plugins/modules/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_host author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA host @@ -22,73 +21,73 @@ attributes: options: fqdn: description: - - Full qualified domain name. - - Can not be changed as it is the unique identifier. + - Full qualified domain name. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: description: - - A description of this host. + - A description of this host. type: str force: description: - - Force host name even if not in DNS. + - Force host name even if not in DNS. required: false type: bool ip_address: description: - - Add the host to DNS with this IP address. + - Add the host to DNS with this IP address. type: str mac_address: description: - - List of Hardware MAC address(es) off this host. - - If option is omitted MAC addresses will not be checked or changed. - - If an empty list is passed all assigned MAC addresses will be removed. - - MAC addresses that are already assigned but not passed will be removed. + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses will not be checked or changed. + - If an empty list is passed all assigned MAC addresses will be removed. + - MAC addresses that are already assigned but not passed will be removed. aliases: ["macaddress"] type: list elements: str ns_host_location: description: - - Host location (e.g. "Lab 2") + - Host location (for example V(Lab 2)). aliases: ["nshostlocation"] type: str ns_hardware_platform: description: - - Host hardware platform (e.g. "Lenovo T61") + - Host hardware platform (for example V(Lenovo T61")). aliases: ["nshardwareplatform"] type: str ns_os_version: description: - - Host operating system and version (e.g. "Fedora 9") + - Host operating system and version (for example V(Fedora 9)). aliases: ["nsosversion"] type: str user_certificate: description: - - List of Base-64 encoded server certificates. - - If option is omitted certificates will not be checked or changed. - - If an empty list is passed all assigned certificates will be removed. - - Certificates already assigned but not passed will be removed. + - List of Base-64 encoded server certificates. + - If option is omitted certificates will not be checked or changed. + - If an empty list is passed all assigned certificates will be removed. + - Certificates already assigned but not passed will be removed. aliases: ["usercertificate"] type: list elements: str state: description: - - State to ensure. + - State to ensure. default: present choices: ["absent", "disabled", "enabled", "present"] type: str force_creation: description: - - Create host if O(state=disabled) or O(state=enabled) but not present. + - Create host if O(state=disabled) or O(state=enabled) but not present. default: true type: bool version_added: 9.5.0 update_dns: description: - - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. - - This option has no effect for states other than "absent". + - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. + - This option has no effect for states other than V(absent). type: bool random_password: description: Generate a random password to be used in bulk enrollment. @@ -96,10 +95,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure host is present community.general.ipa_host: name: host01.example.com @@ -109,8 +107,8 @@ EXAMPLES = r''' ns_os_version: CentOS 7 ns_hardware_platform: Lenovo T61 mac_address: - - "08:00:27:E3:B1:2D" - - "52:54:00:BD:97:1E" + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" state: present ipa_host: ipa.example.com ipa_user: admin @@ -159,18 +157,18 @@ EXAMPLES = r''' ipa_user: admin ipa_pass: topsecret update_dns: true -''' +""" -RETURN = r''' +RETURN = r""" host: description: Host as returned by IPA API. returned: always type: dict host_diff: - description: List of options that differ and would be changed + description: List of options that differ and would be changed. returned: if check mode and a difference is found type: list -''' +""" import traceback diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py index 9e6abf32aa..c1e7d3ad56 100644 --- a/plugins/modules/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_hostgroup author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA host-group @@ -22,61 +21,61 @@ attributes: options: append: description: - - If V(true), add the listed O(host) to the O(hostgroup). - - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts. + - If V(true), add the listed O(host) to the O(hostgroup). + - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts. default: false type: bool version_added: 6.6.0 cn: description: - - Name of host-group. - - Can not be changed as it is the unique identifier. + - Name of host-group. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: description: - - Description. + - Description. type: str host: description: - - List of hosts that belong to the host-group. - - If an empty list is passed all hosts will be removed from the group. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the group. + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts will be removed from the group. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the group. type: list elements: str hostgroup: description: - - List of host-groups than belong to that host-group. - - If an empty list is passed all host-groups will be removed from the group. - - If option is omitted host-groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups will be removed from the group. + - If option is omitted host-groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. type: list elements: str state: description: - - State to ensure. - - V("absent") and V("disabled") give the same results. - - V("present") and V("enabled") give the same results. + - State to ensure. + - V("absent") and V("disabled") give the same results. + - V("present") and V("enabled") give the same results. default: "present" choices: ["absent", "disabled", "enabled", "present"] type: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure host-group databases is present community.general.ipa_hostgroup: name: databases state: present host: - - db.example.com + - db.example.com hostgroup: - - mysql-server - - oracle-server + - mysql-server + - oracle-server ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -88,14 +87,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" hostgroup: description: Hostgroup as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py index e2d8f0cd52..3c07c7eda3 100644 --- a/plugins/modules/ipa_otpconfig.py +++ b/plugins/modules/ipa_otpconfig.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_otpconfig author: justchris1 (@justchris1) short_description: Manage FreeIPA OTP Configuration Settings @@ -41,10 +40,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure the TOTP authentication window is set to 300 seconds community.general.ipa_otpconfig: ipatokentotpauthwindow: '300' @@ -72,14 +70,14 @@ EXAMPLES = r''' ipa_host: localhost ipa_user: admin ipa_pass: supersecret -''' +""" -RETURN = r''' +RETURN = r""" otpconfig: description: OTP configuration as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py index d8a5b3cf1d..5aba671edf 100644 --- a/plugins/modules/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -7,8 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_otptoken author: justchris1 (@justchris1) short_description: Manage FreeIPA OTPs @@ -31,21 +30,21 @@ options: type: str otptype: description: - - Type of OTP. - - "B(Note:) Cannot be modified after OTP is created." + - Type of OTP. + - B(Note:) Cannot be modified after OTP is created. type: str - choices: [ totp, hotp ] + choices: [totp, hotp] secretkey: description: - - Token secret (Base64). - - If OTP is created and this is not specified, a random secret will be generated by IPA. - - "B(Note:) Cannot be modified after OTP is created." + - Token secret (Base64). + - If OTP is created and this is not specified, a random secret will be generated by IPA. + - B(Note:) Cannot be modified after OTP is created. type: str description: description: Description of the token (informational only). type: str owner: - description: Assigned user of the token. + description: Assigned user of the token. type: str enabled: description: Mark the token as enabled (default V(true)). @@ -53,15 +52,15 @@ options: type: bool notbefore: description: - - First date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. + - First date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. type: str notafter: description: - - Last date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. + - Last date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. type: str vendor: description: Token vendor name (informational only). @@ -79,37 +78,37 @@ options: type: str algorithm: description: - - Token hash algorithm. - - "B(Note:) Cannot be modified after OTP is created." + - Token hash algorithm. + - B(Note:) Cannot be modified after OTP is created. choices: ['sha1', 'sha256', 'sha384', 'sha512'] type: str digits: description: - - Number of digits each token code will have. - - "B(Note:) Cannot be modified after OTP is created." - choices: [ 6, 8 ] + - Number of digits each token code will have. + - B(Note:) Cannot be modified after OTP is created. + choices: [6, 8] type: int offset: description: - - TOTP token / IPA server time difference. - - "B(Note:) Cannot be modified after OTP is created." + - TOTP token / IPA server time difference. + - B(Note:) Cannot be modified after OTP is created. type: int interval: description: - - Length of TOTP token code validity in seconds. - - "B(Note:) Cannot be modified after OTP is created." + - Length of TOTP token code validity in seconds. + - B(Note:) Cannot be modified after OTP is created. type: int counter: description: - - Initial counter for the HOTP token. - - "B(Note:) Cannot be modified after OTP is created." + - Initial counter for the HOTP token. + - B(Note:) Cannot be modified after OTP is created. type: int extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a totp for pinky, allowing the IPA server to generate using defaults community.general.ipa_otptoken: uniqueid: Token123 @@ -161,14 +160,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" otptoken: - description: OTP Token as returned by IPA API + description: OTP Token as returned by IPA API. returned: always type: dict -''' +""" import base64 import traceback diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py index ba7d702916..5b41651e09 100644 --- a/plugins/modules/ipa_pwpolicy.py +++ b/plugins/modules/ipa_pwpolicy.py @@ -7,152 +7,153 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_pwpolicy author: Adralioh (@adralioh) short_description: Manage FreeIPA password policies description: -- Add, modify, or delete a password policy using the IPA API. + - Add, modify, or delete a password policy using the IPA API. version_added: 2.0.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - group: - description: - - Name of the group that the policy applies to. - - If omitted, the global policy is used. - aliases: ["name"] - type: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - maxpwdlife: - description: Maximum password lifetime (in days). - type: str - minpwdlife: - description: Minimum password lifetime (in hours). - type: str - historylength: - description: - - Number of previous passwords that are remembered. - - Users cannot reuse remembered passwords. - type: str - minclasses: - description: Minimum number of character classes. - type: str - minlength: - description: Minimum password length. - type: str - priority: - description: - - Priority of the policy. - - High number means lower priority. - - Required when C(cn) is not the global policy. - type: str - maxfailcount: - description: Maximum number of consecutive failures before lockout. - type: str - failinterval: - description: Period (in seconds) after which the number of failed login attempts is reset. - type: str - lockouttime: - description: Period (in seconds) for which users are locked out. - type: str - gracelimit: - description: Maximum number of LDAP logins after password expiration. - type: int - version_added: 8.2.0 - maxrepeat: - description: Maximum number of allowed same consecutive characters in the new password. - type: int - version_added: 8.2.0 - maxsequence: - description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of length 5 is V(12345). - type: int - version_added: 8.2.0 - dictcheck: - description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib). - type: bool - version_added: 8.2.0 - usercheck: - description: Check whether the password (with possible modifications) contains the user name in some form (if the name has > 3 characters). - type: bool - version_added: 8.2.0 + group: + description: + - Name of the group that the policy applies to. + - If omitted, the global policy is used. + aliases: ["name"] + type: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + maxpwdlife: + description: Maximum password lifetime (in days). + type: str + minpwdlife: + description: Minimum password lifetime (in hours). + type: str + historylength: + description: + - Number of previous passwords that are remembered. + - Users cannot reuse remembered passwords. + type: str + minclasses: + description: Minimum number of character classes. + type: str + minlength: + description: Minimum password length. + type: str + priority: + description: + - Priority of the policy. + - High number means lower priority. + - Required when C(cn) is not the global policy. + type: str + maxfailcount: + description: Maximum number of consecutive failures before lockout. + type: str + failinterval: + description: Period (in seconds) after which the number of failed login attempts is reset. + type: str + lockouttime: + description: Period (in seconds) for which users are locked out. + type: str + gracelimit: + description: Maximum number of LDAP logins after password expiration. + type: int + version_added: 8.2.0 + maxrepeat: + description: Maximum number of allowed same consecutive characters in the new password. + type: int + version_added: 8.2.0 + maxsequence: + description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of + length 5 is V(12345). + type: int + version_added: 8.2.0 + dictcheck: + description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib). + type: bool + version_added: 8.2.0 + usercheck: + description: Check whether the password (with possible modifications) contains the user name in some form (if the name + has > 3 characters). + type: bool + version_added: 8.2.0 extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Modify the global password policy community.general.ipa_pwpolicy: - maxpwdlife: '90' - minpwdlife: '1' - historylength: '8' - minclasses: '3' - minlength: '16' - maxfailcount: '6' - failinterval: '60' - lockouttime: '600' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret + maxpwdlife: '90' + minpwdlife: '1' + historylength: '8' + minclasses: '3' + minlength: '16' + maxfailcount: '6' + failinterval: '60' + lockouttime: '600' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret - name: Ensure the password policy for the group admins is present community.general.ipa_pwpolicy: - group: admins - state: present - maxpwdlife: '60' - minpwdlife: '24' - historylength: '16' - minclasses: '4' - priority: '10' - minlength: '6' - maxfailcount: '4' - failinterval: '600' - lockouttime: '1200' - gracelimit: 3 - maxrepeat: 3 - maxsequence: 3 - dictcheck: true - usercheck: true - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret + group: admins + state: present + maxpwdlife: '60' + minpwdlife: '24' + historylength: '16' + minclasses: '4' + priority: '10' + minlength: '6' + maxfailcount: '4' + failinterval: '600' + lockouttime: '1200' + gracelimit: 3 + maxrepeat: 3 + maxsequence: 3 + dictcheck: true + usercheck: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret - name: Ensure that the group sysops does not have a unique password policy community.general.ipa_pwpolicy: - group: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' + group: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" -RETURN = r''' +RETURN = r""" pwpolicy: - description: Password policy as returned by IPA API. - returned: always - type: dict - sample: - cn: ['admins'] - cospriority: ['10'] - dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' - krbmaxpwdlife: ['60'] - krbminpwdlife: ['24'] - krbpwdfailurecountinterval: ['600'] - krbpwdhistorylength: ['16'] - krbpwdlockoutduration: ['1200'] - krbpwdmaxfailure: ['4'] - krbpwdmindiffchars: ['4'] - objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] -''' + description: Password policy as returned by IPA API. + returned: always + type: dict + sample: + cn: ['admins'] + cospriority: ['10'] + dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' + krbmaxpwdlife: ['60'] + krbminpwdlife: ['24'] + krbpwdfailurecountinterval: ['600'] + krbpwdhistorylength: ['16'] + krbpwdlockoutduration: ['1200'] + krbpwdmaxfailure: ['4'] + krbpwdmindiffchars: ['4'] + objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] +""" import traceback diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py index fce315b662..e77b732cb2 100644 --- a/plugins/modules/ipa_role.py +++ b/plugins/modules/ipa_role.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_role author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA role description: -- Add, modify and delete a role within FreeIPA server using FreeIPA API. + - Add, modify and delete a role within FreeIPA server using FreeIPA API. attributes: check_mode: support: full @@ -22,53 +21,53 @@ attributes: options: cn: description: - - Role name. - - Can not be changed as it is the unique identifier. + - Role name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str description: description: - - A description of this role-group. + - A description of this role-group. type: str group: description: - - List of group names assign to this role. - - If an empty list is passed all assigned groups will be unassigned from the role. - - If option is omitted groups will not be checked or changed. - - If option is passed all assigned groups that are not passed will be unassigned from the role. + - List of group names assign to this role. + - If an empty list is passed all assigned groups will be unassigned from the role. + - If option is omitted groups will not be checked or changed. + - If option is passed all assigned groups that are not passed will be unassigned from the role. type: list elements: str host: description: - - List of host names to assign. - - If an empty list is passed all assigned hosts will be unassigned from the role. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the role. + - List of host names to assign. + - If an empty list is passed all assigned hosts will be unassigned from the role. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the role. type: list elements: str hostgroup: description: - - List of host group names to assign. - - If an empty list is passed all assigned host groups will be removed from the role. - - If option is omitted host groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + - List of host group names to assign. + - If an empty list is passed all assigned host groups will be removed from the role. + - If option is omitted host groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. type: list elements: str privilege: description: - - List of privileges granted to the role. - - If an empty list is passed all assigned privileges will be removed. - - If option is omitted privileges will not be checked or changed. - - If option is passed all assigned privileges that are not passed will be removed. + - List of privileges granted to the role. + - If an empty list is passed all assigned privileges will be removed. + - If option is omitted privileges will not be checked or changed. + - If option is passed all assigned privileges that are not passed will be removed. type: list elements: str service: description: - - List of service names to assign. - - If an empty list is passed all assigned services will be removed from the role. - - If option is omitted services will not be checked or changed. - - If option is passed all assigned services that are not passed will be removed from the role. + - List of service names to assign. + - If an empty list is passed all assigned services will be removed from the role. + - If option is omitted services will not be checked or changed. + - If option is passed all assigned services that are not passed will be removed from the role. type: list elements: str state: @@ -78,26 +77,25 @@ options: type: str user: description: - - List of user names to assign. - - If an empty list is passed all assigned users will be removed from the role. - - If option is omitted users will not be checked or changed. + - List of user names to assign. + - If an empty list is passed all assigned users will be removed from the role. + - If option is omitted users will not be checked or changed. type: list elements: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure role is present community.general.ipa_role: name: dba description: Database Administrators state: present user: - - pinky - - brain + - pinky + - brain ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -107,16 +105,16 @@ EXAMPLES = r''' name: another-role description: Just another role group: - - editors + - editors host: - - host01.example.com + - host01.example.com hostgroup: - - hostgroup01 + - hostgroup01 privilege: - - Group Administrators - - User Administrators + - Group Administrators + - User Administrators service: - - service01 + - service01 - name: Ensure role is absent community.general.ipa_role: @@ -125,14 +123,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" role: description: Role as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py index d9541674f2..54c5575950 100644 --- a/plugins/modules/ipa_service.py +++ b/plugins/modules/ipa_service.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_service author: Cédric Parent (@cprh) short_description: Manage FreeIPA service description: -- Add and delete an IPA service using IPA API. + - Add and delete an IPA service using IPA API. attributes: check_mode: support: full @@ -22,26 +21,26 @@ attributes: options: krbcanonicalname: description: - - Principal of the service. - - Can not be changed as it is the unique identifier. + - Principal of the service. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str hosts: description: - - Defines the list of 'ManagedBy' hosts. + - Defines the list of C(ManagedBy) hosts. required: false type: list elements: str force: description: - - Force principal name even if host is not in DNS. + - Force principal name even if host is not in DNS. required: false type: bool skip_host_check: description: - - Force service to be created even when host object does not exist to manage it. - - This is only used on creation, not for updating existing services. + - Force service to be created even when host object does not exist to manage it. + - This is only used on creation, not for updating existing services. required: false type: bool default: false @@ -55,10 +54,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure service is present community.general.ipa_service: name: http/host01.example.com @@ -79,19 +77,19 @@ EXAMPLES = r''' community.general.ipa_service: name: http/host01.example.com hosts: - - host01.example.com - - host02.example.com + - host01.example.com + - host02.example.com ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" service: description: Service as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_subca.py b/plugins/modules/ipa_subca.py index 882b1ac396..ddb551689d 100644 --- a/plugins/modules/ipa_subca.py +++ b/plugins/modules/ipa_subca.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_subca author: Abhijeet Kasurde (@Akasurde) short_description: Manage FreeIPA Lightweight Sub Certificate Authorities description: -- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. + - Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. attributes: check_mode: support: full @@ -22,23 +21,23 @@ attributes: options: subca_name: description: - - The Sub Certificate Authority name which needs to be managed. + - The Sub Certificate Authority name which needs to be managed. required: true aliases: ["name"] type: str subca_subject: description: - - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. + - The Sub Certificate Authority's Subject, for example V(CN=SampleSubCA1,O=testrelm.test). required: true type: str subca_desc: description: - - The Sub Certificate Authority's description. + - The Sub Certificate Authority's description. type: str state: description: - - State to ensure. - - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. + - State to ensure. + - States V(disable) and V(enable) are available for FreeIPA 4.4.2 version and onwards. required: false default: present choices: ["absent", "disabled", "enabled", "present"] @@ -46,10 +45,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure IPA Sub CA is present community.general.ipa_subca: ipa_host: spider.example.com @@ -72,14 +70,14 @@ EXAMPLES = ''' ipa_pass: Passw0rd! state: disable subca_name: AnsibleSubCA1 -''' +""" -RETURN = r''' +RETURN = r""" subca: description: IPA Sub CA record as returned by IPA API. returned: always type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec diff --git a/plugins/modules/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py index d3139ba1c3..f52d3e9e6d 100644 --- a/plugins/modules/ipa_sudocmd.py +++ b/plugins/modules/ipa_sudocmd.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudocmd author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo command description: -- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. + - Add, modify or delete sudo command within FreeIPA server using FreeIPA API. attributes: check_mode: support: full @@ -22,13 +21,13 @@ attributes: options: sudocmd: description: - - Sudo command. + - Sudo command. aliases: ['name'] required: true type: str description: description: - - A description of this command. + - A description of this command. type: str state: description: State to ensure. @@ -38,10 +37,9 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure sudo command exists community.general.ipa_sudocmd: name: su @@ -57,14 +55,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudocmd: - description: Sudo command as return from IPA API + description: Sudo command as return from IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py index a768e74a1a..c7ab798f4c 100644 --- a/plugins/modules/ipa_sudocmdgroup.py +++ b/plugins/modules/ipa_sudocmdgroup.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudocmdgroup author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo command group description: -- Add, modify or delete sudo command group within IPA server using IPA API. + - Add, modify or delete sudo command group within IPA server using IPA API. attributes: check_mode: support: full @@ -22,13 +21,13 @@ attributes: options: cn: description: - - Sudo Command Group. + - Sudo Command Group. aliases: ['name'] required: true type: str description: description: - - Group description. + - Group description. type: str state: description: State to ensure. @@ -37,24 +36,23 @@ options: type: str sudocmd: description: - - List of sudo commands to assign to the group. - - If an empty list is passed all assigned commands will be removed from the group. - - If option is omitted sudo commands will not be checked or changed. + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands will be removed from the group. + - If option is omitted sudo commands will not be checked or changed. type: list elements: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure sudo command group exists community.general.ipa_sudocmdgroup: name: group01 description: Group of important commands sudocmd: - - su + - su ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -66,14 +64,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudocmdgroup: - description: Sudo command group as returned by IPA API + description: Sudo command group as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py index 223f6b6de7..1670a52035 100644 --- a/plugins/modules/ipa_sudorule.py +++ b/plugins/modules/ipa_sudorule.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudorule author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo rule description: -- Add, modify or delete sudo rule within IPA server using IPA API. + - Add, modify or delete sudo rule within IPA server using IPA API. attributes: check_mode: support: full @@ -22,83 +21,83 @@ attributes: options: cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str cmdcategory: description: - - Command category the rule applies to. + - Command category the rule applies to. choices: ['all'] type: str cmd: description: - - List of commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. + - List of commands assigned to the rule. + - If an empty list is passed all commands will be removed from the rule. + - If option is omitted commands will not be checked or changed. type: list elements: str cmdgroup: description: - - List of command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. + - List of command groups assigned to the rule. + - If an empty list is passed all command groups will be removed from the rule. + - If option is omitted command groups will not be checked or changed. type: list elements: str version_added: 2.0.0 deny_cmd: description: - - List of denied commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. + - List of denied commands assigned to the rule. + - If an empty list is passed all commands will be removed from the rule. + - If option is omitted commands will not be checked or changed. type: list elements: str version_added: 8.1.0 deny_cmdgroup: description: - - List of denied command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. + - List of denied command groups assigned to the rule. + - If an empty list is passed all command groups will be removed from the rule. + - If option is omitted command groups will not be checked or changed. type: list elements: str version_added: 8.1.0 description: description: - - Description of the sudo rule. + - Description of the sudo rule. type: str host: description: - - List of hosts assigned to the rule. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - - Option O(hostcategory) must be omitted to assign hosts. + - List of hosts assigned to the rule. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + - Option O(hostcategory) must be omitted to assign hosts. type: list elements: str hostcategory: description: - - Host category the rule applies to. - - If V(all) is passed one must omit O(host) and O(hostgroup). - - Option O(host) and O(hostgroup) must be omitted to assign V(all). + - Host category the rule applies to. + - If V(all) is passed one must omit O(host) and O(hostgroup). + - Option O(host) and O(hostgroup) must be omitted to assign V(all). choices: ['all'] type: str hostgroup: description: - - List of host groups assigned to the rule. - - If an empty list is passed all host groups will be removed from the rule. - - If option is omitted host groups will not be checked or changed. - - Option O(hostcategory) must be omitted to assign host groups. + - List of host groups assigned to the rule. + - If an empty list is passed all host groups will be removed from the rule. + - If option is omitted host groups will not be checked or changed. + - Option O(hostcategory) must be omitted to assign host groups. type: list elements: str runasextusers: description: - - List of external RunAs users + - List of external RunAs users. type: list elements: str version_added: 2.3.0 runasusercategory: description: - - RunAs User category the rule applies to. + - RunAs User category the rule applies to. choices: ['all'] type: str runasgroupcategory: @@ -113,21 +112,21 @@ options: elements: str user: description: - - List of users assigned to the rule. - - If an empty list is passed all users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - List of users assigned to the rule. + - If an empty list is passed all users will be removed from the rule. + - If option is omitted users will not be checked or changed. type: list elements: str usercategory: description: - - User category the rule applies to. + - User category the rule applies to. choices: ['all'] type: str usergroup: description: - - List of user groups assigned to the rule. - - If an empty list is passed all user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - List of user groups assigned to the rule. + - If an empty list is passed all user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. type: list elements: str state: @@ -138,18 +137,18 @@ options: extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' -- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. +EXAMPLES = r""" +- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked + for a password. community.general.ipa_sudorule: name: sudo_all_nopasswd cmdcategory: all description: Allow to run every command with sudo without password hostcategory: all sudoopt: - - '!authenticate' + - '!authenticate' usercategory: all ipa_host: ipa.example.com ipa_user: admin @@ -161,13 +160,13 @@ EXAMPLES = r''' description: Allow developers to run every command with sudo on all database server cmdcategory: all host: - - db01.example.com + - db01.example.com hostgroup: - - db-server + - db-server sudoopt: - - '!authenticate' + - '!authenticate' usergroup: - - developers + - developers ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -177,25 +176,25 @@ EXAMPLES = r''' name: sudo_operations_all description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. cmdgroup: - - operations-cmdgroup + - operations-cmdgroup hostcategory: all runasextusers: - - root + - root sudoopt: - - '!authenticate' + - '!authenticate' usergroup: - - operators + - operators ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudorule: - description: Sudorule as returned by IPA + description: Sudorule as returned by IPA. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py index e8a1858d0b..039cf3cb70 100644 --- a/plugins/modules/ipa_user.py +++ b/plugins/modules/ipa_user.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_user author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA users description: -- Add, modify and delete user within IPA server. + - Add, modify and delete user within IPA server. attributes: check_mode: support: full @@ -25,46 +24,46 @@ options: type: str update_password: description: - - Set password for a user. + - Set password for a user. type: str default: 'always' - choices: [ always, on_create ] + choices: [always, on_create] givenname: description: - - First name. - - If user does not exist and O(state=present), the usage of O(givenname) is required. + - First name. + - If user does not exist and O(state=present), the usage of O(givenname) is required. type: str krbpasswordexpiration: description: - - Date at which the user password will expire. - - In the format YYYYMMddHHmmss. - - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. + - Date at which the user password will expire. + - In the format YYYYMMddHHmmss. + - For example V(20180121182022) will expire on 21 January 2018 at 18:20:22. type: str loginshell: description: Login shell. type: str mail: description: - - List of mail addresses assigned to the user. - - If an empty list is passed all assigned email addresses will be deleted. - - If None is passed email addresses will not be checked or changed. + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses will be deleted. + - If None is passed email addresses will not be checked or changed. type: list elements: str password: description: - - Password for a user. - - Will not be set for an existing user unless O(update_password=always), which is the default. + - Password for a user. + - Will not be set for an existing user unless O(update_password=always), which is the default. type: str sn: description: - - Surname. - - If user does not exist and O(state=present), the usage of O(sn) is required. + - Surname. + - If user does not exist and O(state=present), the usage of O(sn) is required. type: str sshpubkey: description: - - List of public SSH key. - - If an empty list is passed all assigned public keys will be deleted. - - If None is passed SSH public keys will not be checked or changed. + - List of public SSH key. + - If an empty list is passed all assigned public keys will be deleted. + - If None is passed SSH public keys will not be checked or changed. type: list elements: str state: @@ -74,37 +73,37 @@ options: type: str telephonenumber: description: - - List of telephone numbers assigned to the user. - - If an empty list is passed all assigned telephone numbers will be deleted. - - If None is passed telephone numbers will not be checked or changed. + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers will be deleted. + - If None is passed telephone numbers will not be checked or changed. type: list elements: str title: description: Title. type: str uid: - description: uid of the user. + description: Uid of the user. required: true aliases: ["name"] type: str uidnumber: description: - - Account Settings UID/Posix User ID number. + - Account Settings UID/Posix User ID number. type: str gidnumber: description: - - Posix Group ID. + - Posix Group ID. type: str homedirectory: description: - - Default home directory of the user. + - Default home directory of the user. type: str version_added: '0.2.0' userauthtype: description: - - The authentication type to use for the user. - - To remove all authentication types from the user, use an empty list V([]). - - The choice V(idp) and V(passkey) has been added in community.general 8.1.0. + - The authentication type to use for the user. + - To remove all authentication types from the user, use an empty list V([]). + - The choice V(idp) and V(passkey) has been added in community.general 8.1.0. choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"] type: list elements: str @@ -114,11 +113,11 @@ extends_documentation_fragment: - community.general.attributes requirements: -- base64 -- hashlib -''' + - base64 + - hashlib +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure pinky is present and always reset password community.general.ipa_user: name: pinky @@ -127,12 +126,12 @@ EXAMPLES = r''' givenname: Pinky sn: Acme mail: - - pinky@acme.com + - pinky@acme.com telephonenumber: - - '+555123456' + - '+555123456' sshpubkey: - - ssh-rsa .... - - ssh-dsa .... + - ssh-rsa .... + - ssh-dsa .... uidnumber: '1001' gidnumber: '100' homedirectory: /home/pinky @@ -170,14 +169,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" user: - description: User as returned by IPA API + description: User as returned by IPA API. returned: always type: dict -''' +""" import base64 import hashlib diff --git a/plugins/modules/ipa_vault.py b/plugins/modules/ipa_vault.py index 88947e470e..23002b7ce0 100644 --- a/plugins/modules/ipa_vault.py +++ b/plugins/modules/ipa_vault.py @@ -7,84 +7,82 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_vault author: Juan Manuel Parrilla (@jparrill) short_description: Manage FreeIPA vaults description: -- Add, modify and delete vaults and secret vaults. -- KRA service should be enabled to use this module. + - Add, modify and delete vaults and secret vaults. + - KRA service should be enabled to use this module. attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - cn: - description: - - Vault name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str + cn: description: - description: - - Description. - type: str - ipavaulttype: - description: - - Vault types are based on security level. - default: "symmetric" - choices: ["asymmetric", "standard", "symmetric"] - aliases: ["vault_type"] - type: str - ipavaultpublickey: - description: - - Public key. - aliases: ["vault_public_key"] - type: str - ipavaultsalt: - description: - - Vault Salt. - aliases: ["vault_salt"] - type: str - username: - description: - - Any user can own one or more user vaults. - - Mutually exclusive with service. - aliases: ["user"] - type: list - elements: str - service: - description: - - Any service can own one or more service vaults. - - Mutually exclusive with user. - type: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - replace: - description: - - Force replace the existent vault on IPA server. - type: bool - default: false - choices: ["True", "False"] - validate_certs: - description: - - Validate IPA server certificates. - type: bool - default: true + - Vault name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + ipavaulttype: + description: + - Vault types are based on security level. + default: "symmetric" + choices: ["asymmetric", "standard", "symmetric"] + aliases: ["vault_type"] + type: str + ipavaultpublickey: + description: + - Public key. + aliases: ["vault_public_key"] + type: str + ipavaultsalt: + description: + - Vault Salt. + aliases: ["vault_salt"] + type: str + username: + description: + - Any user can own one or more user vaults. + - Mutually exclusive with O(service). + aliases: ["user"] + type: list + elements: str + service: + description: + - Any service can own one or more service vaults. + - Mutually exclusive with O(user). + type: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + replace: + description: + - Force replace the existent vault on IPA server. + type: bool + default: false + choices: ["True", "False"] + validate_certs: + description: + - Validate IPA server certificates. + type: bool + default: true extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure vault is present community.general.ipa_vault: name: vault01 @@ -128,14 +126,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" vault: - description: Vault as returned by IPA API + description: Vault as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py index c6a5511b73..3c7d3d26c1 100644 --- a/plugins/modules/ipbase_info.py +++ b/plugins/modules/ipbase_info.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: "ipbase_info" version_added: "7.0.0" short_description: "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API" description: - - "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API" + - Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API. author: "Dominik Kukacka (@dominikkukacka)" extends_documentation_fragment: - "community.general.attributes" @@ -22,31 +21,31 @@ extends_documentation_fragment: options: ip: description: - - "The IP you want to get the info for. If not specified the API will detect the IP automatically." + - The IP you want to get the info for. If not specified the API will detect the IP automatically. required: false type: str apikey: description: - - "The API key for the request if you need more requests." + - The API key for the request if you need more requests. required: false type: str hostname: description: - - "If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP." + - If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP. required: false type: bool default: false language: description: - - "An ISO Alpha 2 Language Code for localizing the IP data" + - An ISO Alpha 2 Language Code for localizing the IP data. required: false type: str default: "en" notes: - - "Check U(https://ipbase.com/) for more information." -''' + - Check U(https://ipbase.com/) for more information. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Get IP geolocation information of the primary outgoing IP" community.general.ipbase_info: register: my_ip_info @@ -64,12 +63,12 @@ EXAMPLES = ''' hostname: true language: "de" register: my_ip_info +""" -''' - -RETURN = ''' +RETURN = r""" data: - description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure of the response." + description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure + of the response." returned: success type: dict sample: { @@ -213,7 +212,7 @@ data: ] } } -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py index ff17d7e543..7767c8d0ff 100644 --- a/plugins/modules/ipify_facts.py +++ b/plugins/modules/ipify_facts.py @@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipify_facts short_description: Retrieve the public IP of your internet gateway description: - If behind NAT and need to know the public IP of your internet gateway. author: -- René Moser (@resmo) + - René Moser (@resmo) extends_documentation_fragment: - community.general.attributes - community.general.attributes.facts @@ -40,9 +39,9 @@ options: default: true notes: - Visit https://www.ipify.org to get more information. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Gather IP facts from ipify.org - name: Get my public IP community.general.ipify_facts: @@ -52,16 +51,15 @@ EXAMPLES = r''' community.general.ipify_facts: api_url: http://api.example.com/ipify timeout: 20 -''' +""" -RETURN = r''' ---- +RETURN = r""" ipify_public_ip: description: Public IP of the internet gateway. returned: success type: str sample: 1.2.3.4 -''' +""" import json diff --git a/plugins/modules/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py index f29b3cbf4c..45c86e0491 100644 --- a/plugins/modules/ipinfoio_facts.py +++ b/plugins/modules/ipinfoio_facts.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipinfoio_facts short_description: Retrieve IP geolocation facts of a host's IP address description: - - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" + - Gather IP geolocation facts of a host's IP address using ipinfo.io API. author: "Aleksei Kostiuk (@akostyuk)" extends_documentation_fragment: - community.general.attributes @@ -23,65 +22,65 @@ extends_documentation_fragment: options: timeout: description: - - HTTP connection timeout in seconds + - HTTP connection timeout in seconds. required: false default: 10 type: int http_agent: description: - - Set http user agent + - Set http user agent. required: false default: "ansible-ipinfoio-module/0.0.1" type: str notes: - - "Check http://ipinfo.io/ for more information" -''' + - Check http://ipinfo.io/ for more information. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Retrieve geolocation data of a host's IP address - name: Get IP geolocation data community.general.ipinfoio_facts: -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: - description: "Dictionary of ip geolocation facts for a host's IP address" + description: "Dictionary of ip geolocation facts for a host's IP address." returned: changed type: complex contains: ip: - description: "Public IP address of a host" + description: "Public IP address of a host." type: str sample: "8.8.8.8" hostname: - description: Domain name + description: Domain name. type: str sample: "google-public-dns-a.google.com" country: - description: ISO 3166-1 alpha-2 country code + description: ISO 3166-1 alpha-2 country code. type: str sample: "US" region: - description: State or province name + description: State or province name. type: str sample: "California" city: - description: City name + description: City name. type: str sample: "Mountain View" loc: - description: Latitude and Longitude of the location + description: Latitude and Longitude of the location. type: str sample: "37.3860,-122.0838" org: - description: "organization's name" + description: "Organization's name." type: str sample: "AS3356 Level 3 Communications, Inc." postal: - description: Postal code + description: Postal code. type: str sample: "94035" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py index 9f0016560e..e4fb5544d6 100644 --- a/plugins/modules/ipmi_boot.py +++ b/plugins/modules/ipmi_boot.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipmi_boot short_description: Management of order of boot devices description: - - Use this module to manage order of boot devices + - Use this module to manage order of boot devices. extends_documentation_fragment: - community.general.attributes attributes: @@ -52,14 +51,14 @@ options: bootdev: description: - Set boot device to use on next reboot - - "The choices for the device are: - - network -- Request network boot - - floppy -- Boot from floppy - - hd -- Boot from hard drive - - safe -- Boot from hard drive, requesting 'safe mode' - - optical -- boot from CD/DVD/BD drive - - setup -- Boot into setup utility - - default -- remove any IPMI directed boot device request" + - "The choices for the device are:" + - V(network) -- Request network boot + - V(floppy) -- Boot from floppy + - V(hd) -- Boot from hard drive + - "V(safe) -- Boot from hard drive, requesting 'safe mode'" + - V(optical) -- boot from CD/DVD/BD drive + - V(setup) -- Boot into setup utility + - V(default) -- remove any IPMI directed boot device request required: true choices: - network @@ -73,49 +72,46 @@ options: state: description: - Whether to ensure that boot devices is desired. - - "The choices for the state are: - - present -- Request system turn on - - absent -- Request system turn on" + - 'The choices for the state are: - present -- Request system turn on - absent -- Request system turn on.' default: present - choices: [ present, absent ] + choices: [present, absent] type: str persistent: description: - - If set, ask that system firmware uses this device beyond next boot. - Be aware many systems do not honor this. + - If set, ask that system firmware uses this device beyond next boot. Be aware many systems do not honor this. type: bool default: false uefiboot: description: - - If set, request UEFI boot explicitly. - Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. - In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. + - If set, request UEFI boot explicitly. Strictly speaking, the spec suggests that if not set, the system should BIOS + boot and offers no "do not care" option. In practice, this flag not being set does not preclude UEFI boot on any system + I have encountered. type: bool default: false requirements: - pyghmi author: "Bulat Gaifullin (@bgaifullin) " -''' +""" -RETURN = ''' +RETURN = r""" bootdev: - description: The boot device name which will be used beyond next boot. - returned: success - type: str - sample: default + description: The boot device name which will be used beyond next boot. + returned: success + type: str + sample: default persistent: - description: If True, system firmware will use this device beyond next boot. - returned: success - type: bool - sample: false + description: If True, system firmware will use this device beyond next boot. + returned: success + type: bool + sample: false uefimode: - description: If True, system firmware will use UEFI boot explicitly beyond next boot. - returned: success - type: bool - sample: false -''' + description: If True, system firmware will use UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure bootdevice is HD community.general.ipmi_boot: name: test.testdomain.com @@ -131,7 +127,7 @@ EXAMPLES = ''' key: 1234567890AABBCCDEFF000000EEEE12 bootdev: network state: absent -''' +""" import traceback import binascii diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py index 587cee06f3..3cb6d501ea 100644 --- a/plugins/modules/ipmi_power.py +++ b/plugins/modules/ipmi_power.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipmi_power short_description: Power management for machine description: - - Use this module for power management + - Use this module for power management. extends_documentation_fragment: - community.general.attributes attributes: @@ -52,12 +51,12 @@ options: state: description: - Whether to ensure that the machine in desired state. - - "The choices for state are: - - on -- Request system turn on - - off -- Request system turn off without waiting for OS to shutdown - - shutdown -- Have system request OS proper shutdown - - reset -- Request system reset without waiting for OS - - boot -- If system is off, then 'on', else 'reset'" + - "The choices for state are:" + - V(on) -- Request system turn on + - V(off) -- Request system turn off without waiting for OS to shutdown + - V(shutdown) -- Have system request OS proper shutdown + - V(reset) -- Request system reset without waiting for OS + - "V(boot) -- If system is off, then V(on), else V(reset)" - Either this option or O(machine) is required. choices: ['on', 'off', shutdown, reset, boot] type: str @@ -68,8 +67,7 @@ options: type: int machine: description: - - Provide a list of the remote target address for the bridge IPMI request, - and the power status. + - Provide a list of the remote target address for the bridge IPMI request, and the power status. - Either this option or O(state) is required. required: false type: list @@ -92,40 +90,31 @@ options: requirements: - pyghmi author: "Bulat Gaifullin (@bgaifullin) " -''' +""" -RETURN = ''' +RETURN = r""" powerstate: - description: The current power state of the machine. - returned: success and O(machine) is not provided - type: str - sample: 'on' + description: The current power state of the machine. + returned: success and O(machine) is not provided + type: str + sample: 'on' status: - description: The current power state of the machine when the machine option is set. - returned: success and O(machine) is provided - type: list - elements: dict - version_added: 4.3.0 - contains: - powerstate: - description: The current power state of the machine specified by RV(status[].targetAddress). - type: str - targetAddress: - description: The remote target address. - type: int - sample: [ - { - "powerstate": "on", - "targetAddress": 48, - }, - { - "powerstate": "on", - "targetAddress": 50, - }, - ] -''' + description: The current power state of the machine when the machine option is set. + returned: success and O(machine) is provided + type: list + elements: dict + version_added: 4.3.0 + contains: + powerstate: + description: The current power state of the machine specified by RV(status[].targetAddress). + type: str + targetAddress: + description: The remote target address. + type: int + sample: [{"powerstate": "on", "targetAddress": 48}, {"powerstate": "on", "targetAddress": 50}] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure machine is powered on community.general.ipmi_power: name: test.testdomain.com @@ -153,7 +142,7 @@ EXAMPLES = ''' state: 'on' - targetAddress: 50 state: 'off' -''' +""" import traceback import binascii diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index c97b5694c9..6f3fa19042 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: iptables_state short_description: Save iptables state into a file or restore it from a file version_added: '1.1.0' @@ -19,26 +18,17 @@ extends_documentation_fragment: - community.general.attributes - community.general.attributes.flow description: - - C(iptables) is used to set up, maintain, and inspect the tables of IP - packet filter rules in the Linux kernel. - - This module handles the saving and/or loading of rules. This is the same - as the behaviour of the C(iptables-save) and C(iptables-restore) (or - C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this - module uses internally. - - Modifying the state of the firewall remotely may lead to loose access to - the host in case of mistake in new ruleset. This module embeds a rollback - feature to avoid this, by telling the host to restore previous rules if a - cookie is still there after a given delay, and all this time telling the - controller to try to remove this cookie on the host through a new - connection. + - C(iptables) is used to set up, maintain, and inspect the tables of IP packet filter rules in the Linux kernel. + - This module handles the saving and/or loading of rules. This is the same as the behaviour of the C(iptables-save) and + C(iptables-restore) (or C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this module uses internally. + - Modifying the state of the firewall remotely may lead to loose access to the host in case of mistake in new ruleset. This + module embeds a rollback feature to avoid this, by telling the host to restore previous rules if a cookie is still there + after a given delay, and all this time telling the controller to try to remove this cookie on the host through a new connection. notes: - - The rollback feature is not a module option and depends on task's - attributes. To enable it, the module must be played asynchronously, i.e. - by setting task attributes C(poll) to V(0), and C(async) to a value less - or equal to C(ANSIBLE_TIMEOUT). If C(async) is greater, the rollback will - still happen if it shall happen, but you will experience a connection - timeout instead of more relevant info returned by the module after its - failure. + - The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played + asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT). + If C(async) is greater, the rollback will still happen if it shall happen, but you will experience a connection timeout + instead of more relevant info returned by the module after its failure. attributes: check_mode: support: full @@ -59,22 +49,18 @@ options: description: - Which version of the IP protocol this module should apply to. type: str - choices: [ ipv4, ipv6 ] + choices: [ipv4, ipv6] default: ipv4 modprobe: description: - - Specify the path to the C(modprobe) program internally used by iptables - related commands to load kernel modules. - - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the - executable's path. + - Specify the path to the C(modprobe) program internally used by iptables related commands to load kernel modules. + - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the executable's path. type: path noflush: description: - For O(state=restored), ignored otherwise. - - If V(false), restoring iptables rules from a file flushes (deletes) - all previous contents of the respective table(s). If V(true), the - previous rules are left untouched (but policies are updated anyway, - for all built-in chains). + - If V(false), restoring iptables rules from a file flushes (deletes) all previous contents of the respective table(s). + If V(true), the previous rules are left untouched (but policies are updated anyway, for all built-in chains). type: bool default: false path: @@ -85,29 +71,26 @@ options: required: true state: description: - - Whether the firewall state should be saved (into a file) or restored - (from a file). + - Whether the firewall state should be saved (into a file) or restored (from a file). type: str - choices: [ saved, restored ] + choices: [saved, restored] required: true table: description: - - When O(state=restored), restore only the named table even if the input - file contains other tables. Fail if the named table is not declared in - the file. - - When O(state=saved), restrict output to the specified table. If not - specified, output includes all active tables. + - When O(state=restored), restore only the named table even if the input file contains other tables. Fail if the named + table is not declared in the file. + - When O(state=saved), restrict output to the specified table. If not specified, output includes all active tables. type: str - choices: [ filter, nat, mangle, raw, security ] + choices: [filter, nat, mangle, raw, security] wait: description: - - Wait N seconds for the xtables lock to prevent instant failure in case - multiple instances of the program are running concurrently. + - Wait N seconds for the xtables lock to prevent instant failure in case multiple instances of the program are running + concurrently. type: int requirements: [iptables, ip6tables] -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # This will apply to all loaded/active IPv4 tables. - name: Save current state of the firewall in system file community.general.iptables_state: @@ -151,9 +134,9 @@ EXAMPLES = r''' - name: show current state of the firewall ansible.builtin.debug: var: iptables_state.initial_state -''' +""" -RETURN = r''' +RETURN = r""" applied: description: Whether or not the wanted state has been successfully restored. type: bool @@ -235,7 +218,7 @@ tables: ] } returned: always -''' +""" import re diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py index 3ffad79fb6..118f59e8d9 100644 --- a/plugins/modules/ipwcli_dns.py +++ b/plugins/modules/ipwcli_dns.py @@ -8,127 +8,124 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipwcli_dns -short_description: Manage DNS Records for Ericsson IPWorks via ipwcli +short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli) version_added: '0.2.0' description: - - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." - + - Manage DNS records for the Ericsson IPWorks DNS server. The module will use the C(ipwcli) to deploy the DNS records. requirements: - - ipwcli (installed on Ericsson IPWorks) + - ipwcli (installed on Ericsson IPWorks) notes: - - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. - + - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - dnsname: - description: - - Name of the record. - required: true - type: str - type: - description: - - Type of the record. - required: true - type: str - choices: [ NAPTR, SRV, A, AAAA ] - container: - description: - - Sets the container zone for the record. - required: true - type: str - address: - description: - - The IP address for the A or AAAA record. - - Required for O(type=A) or O(type=AAAA). - type: str - ttl: - description: - - Sets the TTL of the record. - type: int - default: 3600 - state: - description: - - Whether the record should exist or not. - type: str - choices: [ absent, present ] - default: present - priority: - description: - - Sets the priority of the SRV record. - type: int - default: 10 - weight: - description: - - Sets the weight of the SRV record. - type: int - default: 10 - port: - description: - - Sets the port of the SRV record. - - Required for O(type=SRV). - type: int - target: - description: - - Sets the target of the SRV record. - - Required for O(type=SRV). - type: str - order: - description: - - Sets the order of the NAPTR record. - - Required for O(type=NAPTR). - type: int - preference: - description: - - Sets the preference of the NAPTR record. - - Required for O(type=NAPTR). - type: int - flags: - description: - - Sets one of the possible flags of NAPTR record. - - Required for O(type=NAPTR). - type: str - choices: ['S', 'A', 'U', 'P'] - service: - description: - - Sets the service of the NAPTR record. - - Required for O(type=NAPTR). - type: str - replacement: - description: - - Sets the replacement of the NAPTR record. - - Required for O(type=NAPTR). - type: str - username: - description: - - Username to login on ipwcli. - type: str - required: true - password: - description: - - Password to login on ipwcli. - type: str - required: true + dnsname: + description: + - Name of the record. + required: true + type: str + type: + description: + - Type of the record. + required: true + type: str + choices: [NAPTR, SRV, A, AAAA] + container: + description: + - Sets the container zone for the record. + required: true + type: str + address: + description: + - The IP address for the A or AAAA record. + - Required for O(type=A) or O(type=AAAA). + type: str + ttl: + description: + - Sets the TTL of the record. + type: int + default: 3600 + state: + description: + - Whether the record should exist or not. + type: str + choices: [absent, present] + default: present + priority: + description: + - Sets the priority of the SRV record. + type: int + default: 10 + weight: + description: + - Sets the weight of the SRV record. + type: int + default: 10 + port: + description: + - Sets the port of the SRV record. + - Required for O(type=SRV). + type: int + target: + description: + - Sets the target of the SRV record. + - Required for O(type=SRV). + type: str + order: + description: + - Sets the order of the NAPTR record. + - Required for O(type=NAPTR). + type: int + preference: + description: + - Sets the preference of the NAPTR record. + - Required for O(type=NAPTR). + type: int + flags: + description: + - Sets one of the possible flags of NAPTR record. + - Required for O(type=NAPTR). + type: str + choices: ['S', 'A', 'U', 'P'] + service: + description: + - Sets the service of the NAPTR record. + - Required for O(type=NAPTR). + type: str + replacement: + description: + - Sets the replacement of the NAPTR record. + - Required for O(type=NAPTR). + type: str + username: + description: + - Username to login on ipwcli. + type: str + required: true + password: + description: + - Password to login on ipwcli. + type: str + required: true author: - - Christian Wollinger (@cwollinger) -''' + - Christian Wollinger (@cwollinger) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create A record community.general.ipwcli_dns: dnsname: example.com @@ -157,14 +154,14 @@ EXAMPLES = ''' service: 'SIP+D2T' replacement: '_sip._tcp.test.example.com.' flags: S -''' +""" -RETURN = ''' +RETURN = r""" record: - description: The created record from the input params - type: str - returned: always -''' + description: The created record from the input params. + type: str + returned: always +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py index 748479e87b..cbeb3fafa0 100644 --- a/plugins/modules/irc.py +++ b/plugins/modules/irc.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: irc short_description: Send a message to an IRC channel or a nick description: @@ -26,12 +25,12 @@ options: server: type: str description: - - IRC server name/address + - IRC server name/address. default: localhost port: type: int description: - - IRC server port number + - IRC server port number. default: 6667 nick: type: str @@ -46,45 +45,44 @@ options: topic: type: str description: - - Set the channel topic + - Set the channel topic. color: type: str description: - Text color for the message. default: "none" - choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", "light_gray"] + choices: ["none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", + "light_cyan", "light_blue", "pink", "gray", "light_gray"] aliases: [colour] channel: type: str description: - - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. + - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. nick_to: type: list elements: str description: - - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the + message will be sent to both of them. key: type: str description: - - Channel key + - Channel key. passwd: type: str description: - - Server password + - Server password. timeout: type: int description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop + - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop. default: 30 use_tls: description: - - Designates whether TLS/SSL should be used when connecting to the IRC server - - O(use_tls) is available since community.general 8.1.0, before the option - was exlusively called O(use_ssl). The latter is now an alias of O(use_tls). - - B(Note:) for security reasons, you should always set O(use_tls=true) and - O(validate_certs=true) whenever possible. + - Designates whether TLS/SSL should be used when connecting to the IRC server. + - O(use_tls) is available since community.general 8.1.0, before the option was exlusively called O(use_ssl). The latter + is now an alias of O(use_tls). + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. - The default of this option changed to V(true) in community.general 10.0.0. type: bool default: true @@ -92,36 +90,35 @@ options: - use_ssl part: description: - - Designates whether user should part from channel after sending message or not. - Useful for when using a faux bot and not wanting join/parts between messages. + - Designates whether user should part from channel after sending message or not. Useful for when using a mock bot and + not wanting join/parts between messages. type: bool default: true style: type: str description: - - Text style for the message. Note italic does not work on some clients - choices: [ "bold", "underline", "reverse", "italic", "none" ] + - Text style for the message. Note italic does not work on some clients. + choices: ["bold", "underline", "reverse", "italic", "none"] default: none validate_certs: description: - If set to V(false), the SSL certificates will not be validated. - - This should always be set to V(true). Using V(false) is unsafe and should only be done - if the network between between Ansible and the IRC server is known to be safe. - - B(Note:) for security reasons, you should always set O(use_tls=true) and - O(validate_certs=true) whenever possible. + - This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between + Ansible and the IRC server is known to be safe. + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. - The default of this option changed to V(true) in community.general 10.0.0. type: bool default: true version_added: 8.1.0 # informational: requirements for nodes -requirements: [ socket ] +requirements: [socket] author: - - "Jan-Piet Mens (@jpmens)" - - "Matt Martz (@sivel)" -''' + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to an IRC channel from nick ansible community.general.irc: server: irc.example.net @@ -156,7 +153,7 @@ EXAMPLES = ''' msg: 'All finished at {{ ansible_date_time.iso8601 }}' color: red nick: ansibleIRC -''' +""" # =========================================== # IRC module support methods. diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py index c39c710d53..008cb271bb 100644 --- a/plugins/modules/iso_create.py +++ b/plugins/modules/iso_create.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: iso_create short_description: Generate ISO file with specified files or folders description: @@ -31,60 +30,60 @@ attributes: support: none options: - src_files: - description: - - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. - - Will fail if specified file or folder in O(src_files) does not exist on local machine. - - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and - underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path - names are limited to 255 characters.' - type: list - required: true - elements: path - dest_iso: - description: - - The absolute path with file name of the new generated ISO file on local machine. - - Will create intermediate folders when they does not exist. - type: path - required: true - interchange_level: - description: - - The ISO9660 interchange level to use, it dictates the rules on the names of files. - - Levels and valid values V(1), V(2), V(3), V(4) are supported. - - The default value is level V(1), which is the most conservative, level V(3) is recommended. - - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension. - type: int - default: 1 - choices: [1, 2, 3, 4] - vol_ident: - description: - - The volume identification string to use on the new generated ISO image. - type: str - rock_ridge: - description: - - Whether to make this ISO have the Rock Ridge extensions or not. - - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO. - - If unsure, set V(1.09) to ensure maximum compatibility. - - If not specified, then not add Rock Ridge extension to the ISO. - type: str - choices: ['1.09', '1.10', '1.12'] - joliet: - description: - - Support levels and valid values are V(1), V(2), or V(3). - - Level V(3) is by far the most common. - - If not specified, then no Joliet support is added. - type: int - choices: [1, 2, 3] - udf: - description: - - Whether to add UDF support to this ISO. - - If set to V(true), then version 2.60 of the UDF spec is used. - - If not specified or set to V(false), then no UDF support is added. - type: bool - default: false -''' + src_files: + description: + - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. + - Will fail if specified file or folder in O(src_files) does not exist on local machine. + - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores + (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited + to 255 characters.' + type: list + required: true + elements: path + dest_iso: + description: + - The absolute path with file name of the new generated ISO file on local machine. + - Will create intermediate folders when they does not exist. + type: path + required: true + interchange_level: + description: + - The ISO9660 interchange level to use, it dictates the rules on the names of files. + - Levels and valid values V(1), V(2), V(3), V(4) are supported. + - The default value is level V(1), which is the most conservative, level V(3) is recommended. + - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension. + type: int + default: 1 + choices: [1, 2, 3, 4] + vol_ident: + description: + - The volume identification string to use on the new generated ISO image. + type: str + rock_ridge: + description: + - Whether to make this ISO have the Rock Ridge extensions or not. + - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO. + - If unsure, set V(1.09) to ensure maximum compatibility. + - If not specified, then not add Rock Ridge extension to the ISO. + type: str + choices: ['1.09', '1.10', '1.12'] + joliet: + description: + - Support levels and valid values are V(1), V(2), or V(3). + - Level V(3) is by far the most common. + - If not specified, then no Joliet support is added. + type: int + choices: [1, 2, 3] + udf: + description: + - Whether to add UDF support to this ISO. + - If set to V(true), then version 2.60 of the UDF spec is used. + - If not specified or set to V(false), then no UDF support is added. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create an ISO file community.general.iso_create: src_files: @@ -109,46 +108,46 @@ EXAMPLES = r''' interchange_level: 3 joliet: 3 vol_ident: WIN_AUTOINSTALL -''' +""" -RETURN = r''' +RETURN = r""" source_file: - description: Configured source files or directories list. - returned: on success - type: list - elements: path - sample: ["/path/to/file.txt", "/path/to/folder"] + description: Configured source files or directories list. + returned: on success + type: list + elements: path + sample: ["/path/to/file.txt", "/path/to/folder"] created_iso: - description: Created iso file path. - returned: on success - type: str - sample: "/path/to/test.iso" + description: Created iso file path. + returned: on success + type: str + sample: "/path/to/test.iso" interchange_level: - description: Configured interchange level. - returned: on success - type: int - sample: 3 + description: Configured interchange level. + returned: on success + type: int + sample: 3 vol_ident: - description: Configured volume identification string. - returned: on success - type: str - sample: "OEMDRV" + description: Configured volume identification string. + returned: on success + type: str + sample: "OEMDRV" joliet: - description: Configured Joliet support level. - returned: on success - type: int - sample: 3 + description: Configured Joliet support level. + returned: on success + type: int + sample: 3 rock_ridge: - description: Configured Rock Ridge version. - returned: on success - type: str - sample: "1.09" + description: Configured Rock Ridge version. + returned: on success + type: str + sample: "1.09" udf: - description: Configured UDF support. - returned: on success - type: bool - sample: false -''' + description: Configured UDF support. + returned: on success + type: bool + sample: false +""" import os import traceback diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py index 543faaa5ef..feac8417b8 100644 --- a/plugins/modules/iso_customize.py +++ b/plugins/modules/iso_customize.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: iso_customize short_description: Add/remove/change files in ISO file description: @@ -34,25 +33,25 @@ attributes: options: src_iso: description: - - This is the path of source ISO file. + - This is the path of source ISO file. type: path required: true dest_iso: description: - - The path of the customized ISO file. + - The path of the customized ISO file. type: path required: true delete_files: description: - - Absolute paths for files inside the ISO file that should be removed. + - Absolute paths for files inside the ISO file that should be removed. type: list required: false elements: str default: [] add_files: description: - - Allows to add and replace files in the ISO file. - - Will create intermediate folders inside the ISO file when they do not exist. + - Allows to add and replace files in the ISO file. + - Will create intermediate folders inside the ISO file when they do not exist. type: list required: false elements: dict @@ -60,23 +59,22 @@ options: suboptions: src_file: description: - - The path with file name on the machine the module is executed on. + - The path with file name on the machine the module is executed on. type: path required: true dest_file: description: - - The absolute path of the file inside the ISO file. + - The absolute path of the file inside the ISO file. type: str required: true notes: -- The C(pycdlib) library states it supports Python 2.7 and 3.4+. -- > - The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF. - But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. - So we take workaround "delete the existing file and then add file for ISO with Rock Ridge". -''' + - The C(pycdlib) library states it supports Python 2.7 and 3.4+. + - The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet + / UDF. But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround "delete the + existing file and then add file for ISO with Rock Ridge". +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: "Customize ISO file" community.general.iso_customize: src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso" @@ -89,9 +87,9 @@ EXAMPLES = r''' - src_file: "/path/to/ubuntu.seed" dest_file: "/preseed/ubuntu.seed" register: customize_iso_result -''' +""" -RETURN = r''' +RETURN = r""" src_iso: description: Path of source ISO file. returned: on success @@ -102,7 +100,7 @@ dest_iso: returned: on success type: str sample: "/path/to/customized.iso" -''' +""" import os diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py index 9ef046ede4..8cda967b64 100644 --- a/plugins/modules/iso_extract.py +++ b/plugins/modules/iso_extract.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - Jeroen Hoekx (@jhoekx) - Matt Robinson (@ribbons) @@ -21,12 +20,10 @@ module: iso_extract short_description: Extract files from an ISO image description: - This module has two possible ways of operation. - - If 7zip is installed on the system, this module extracts files from an ISO - into a temporary directory and copies files to a given destination, - if needed. - - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module - mounts the ISO image to a temporary location, and copies files to a given - destination, if needed. + - If 7zip is installed on the system, this module extracts files from an ISO into a temporary directory and copies files + to a given destination, if needed. + - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module mounts the ISO image to a temporary location, + and copies files to a given destination, if needed. requirements: - Either 7z (from C(7zip) or C(p7zip) package) - Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) @@ -40,60 +37,59 @@ attributes: options: image: description: - - The ISO image to extract files from. + - The ISO image to extract files from. type: path required: true - aliases: [ path, src ] + aliases: [path, src] dest: description: - - The destination directory to extract files to. + - The destination directory to extract files to. type: path required: true files: description: - - A list of files to extract from the image. - - Extracting directories does not work. + - A list of files to extract from the image. + - Extracting directories does not work. type: list elements: str required: true force: description: - - If V(true), which will replace the remote file when contents are different than the source. - - If V(false), the file will only be extracted and copied if the destination does not already exist. + - If V(true), which will replace the remote file when contents are different than the source. + - If V(false), the file will only be extracted and copied if the destination does not already exist. type: bool default: true executable: description: - - The path to the C(7z) executable to use for extracting files from the ISO. - - If not provided, it will assume the value V(7z). + - The path to the C(7z) executable to use for extracting files from the ISO. + - If not provided, it will assume the value V(7z). type: path password: description: - - Password used to decrypt files from the ISO. - - Will only be used if 7z is used. - - The password is used as a command line argument to 7z. This is a B(potential security risk) that - allows passwords to be revealed if someone else can list running processes on the same machine - in the right moment. + - Password used to decrypt files from the ISO. + - Will only be used if 7z is used. + - The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords + to be revealed if someone else can list running processes on the same machine in the right moment. type: str version_added: 10.1.0 notes: -- Only the file checksum (content) is taken into account when extracting files - from the ISO image. If O(force=false), only checks the presence of the file. -''' + - Only the file checksum (content) is taken into account when extracting files from the ISO image. If O(force=false), only + checks the presence of the file. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Extract kernel and ramdisk from a LiveCD community.general.iso_extract: image: /tmp/rear-test.iso dest: /tmp/virt-rear/ files: - - isolinux/kernel - - isolinux/initrd.cgz -''' + - isolinux/kernel + - isolinux/initrd.cgz +""" -RETURN = r''' +RETURN = r""" # -''' +""" import os.path import shutil diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py index 650b29957d..01a34ff9f5 100644 --- a/plugins/modules/jabber.py +++ b/plugins/modules/jabber.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jabber short_description: Send a message to jabber user or chat room description: - - Send a message to jabber + - Send a message to jabber. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,17 +25,17 @@ options: user: type: str description: - - User as which to connect + - User as which to connect. required: true password: type: str description: - - password for user to connect + - Password for user to connect. required: true to: type: str description: - - user ID or name of the room, when using room use a slash to indicate your nick. + - User ID or name of the room, when using room use a slash to indicate your nick. required: true msg: type: str @@ -46,24 +45,22 @@ options: host: type: str description: - - host to connect, overrides user info + - Host to connect, overrides user info. port: type: int description: - - port to connect to, overrides default + - Port to connect to, overrides default. default: 5222 encoding: type: str description: - - message encoding - -# informational: requirements for nodes + - Message encoding. requirements: - - python xmpp (xmpppy) + - python xmpp (xmpppy) author: "Brian Coca (@bcoca)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to a user community.general.jabber: user: mybot@example.net @@ -86,7 +83,7 @@ EXAMPLES = ''' password: secret to: mychaps@example.net msg: Ansible task finished -''' +""" import time import traceback diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py index e2d04b71e2..a205ecc789 100644 --- a/plugins/modules/java_cert.py +++ b/plugins/modules/java_cert.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: java_cert short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) description: - - This is a wrapper module around keytool, which can be used to import certificates - and optionally private keys to a given java keystore, or remove them from it. + - This is a wrapper module around keytool, which can be used to import certificates and optionally private keys to a given + java keystore, or remove them from it. extends_documentation_fragment: - community.general.attributes - ansible.builtin.files @@ -61,9 +60,8 @@ options: pkcs12_path: description: - Local path to load PKCS12 keystore from. - - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching - the certificate, and is used to import both the certificate and its private key into the - java keystore. + - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching the certificate, + and is used to import both the certificate and its private key into the java keystore. - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: path pkcs12_password: @@ -100,10 +98,10 @@ options: state: description: - Defines action which can be either certificate import or removal. - - When state is present, the certificate will always idempotently be inserted - into the keystore, even if there already exists a cert alias that is different. + - When state is present, the certificate will always idempotently be inserted into the keystore, even if there already + exists a cert alias that is different. type: str - choices: [ absent, present ] + choices: [absent, present] default: present mode: version_added: 8.5.0 @@ -125,10 +123,10 @@ options: version_added: 8.5.0 requirements: [openssl, keytool] author: -- Adam Hamsik (@haad) -''' + - Adam Hamsik (@haad) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Import SSL certificate from google.com to a given cacerts keystore community.general.java_cert: cert_url: google.com @@ -196,9 +194,9 @@ EXAMPLES = r''' keystore_pass: changeit keystore_create: true state: present -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Output from stdout of keytool command after execution of given command. returned: success @@ -216,7 +214,7 @@ cmd: returned: success type: str sample: "keytool -importcert -noprompt -keystore" -''' +""" import os import tempfile diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index 0a8e3398d5..df7e71abbe 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -10,8 +10,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: java_keystore short_description: Create a Java keystore in JKS format description: @@ -25,25 +24,22 @@ options: name: description: - Name of the certificate in the keystore. - - If the provided name does not exist in the keystore, the module - will re-create the keystore. This behavior changed in community.general 3.0.0, - before that the module would fail when the name did not match. + - If the provided name does not exist in the keystore, the module will re-create the keystore. This behavior changed + in community.general 3.0.0, before that the module would fail when the name did not match. type: str required: true certificate: description: - Content of the certificate used to create the keystore. - - If the fingerprint of the provided certificate does not match the - fingerprint of the certificate bundled in the keystore, the keystore - is regenerated with the provided certificate. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. - Exactly one of O(certificate) or O(certificate_path) is required. type: str certificate_path: description: - Location of the certificate used to create the keystore. - - If the fingerprint of the provided certificate does not match the - fingerprint of the certificate bundled in the keystore, the keystore - is regenerated with the provided certificate. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. - Exactly one of O(certificate) or O(certificate_path) is required. type: path version_added: '3.0.0' @@ -66,10 +62,8 @@ options: password: description: - Password that should be used to secure the keystore. - - If the provided password fails to unlock the keystore, the module - will re-create the keystore with the new passphrase. This behavior - changed in community.general 3.0.0, before that the module would fail - when the password did not match. + - If the provided password fails to unlock the keystore, the module will re-create the keystore with the new passphrase. + This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match. type: str required: true dest: @@ -106,16 +100,13 @@ options: keystore_type: description: - Type of the Java keystore. - - When this option is omitted and the keystore doesn't already exist, the - behavior follows C(keytool)'s default store type which depends on - Java version; V(pkcs12) since Java 9 and V(jks) prior (may also - be V(pkcs12) if new default has been backported to this version). - - When this option is omitted and the keystore already exists, the current - type is left untouched, unless another option leads to overwrite the - keystore (in that case, this option behaves like for keystore creation). - - When O(keystore_type) is set, the keystore is created with this type if - it does not already exist, or is overwritten to match the given type in - case of mismatch. + - When this option is omitted and the keystore does not already exist, the behavior follows C(keytool)'s default store + type which depends on Java version; V(pkcs12) since Java 9 and V(jks) prior (may also be V(pkcs12) if new default + has been backported to this version). + - When this option is omitted and the keystore already exists, the current type is left untouched, unless another option + leads to overwrite the keystore (in that case, this option behaves like for keystore creation). + - When O(keystore_type) is set, the keystore is created with this type if it does not already exist, or is overwritten + to match the given type in case of mismatch. type: str choices: - jks @@ -135,16 +126,14 @@ seealso: - module: community.crypto.openssl_pkcs12 - module: community.general.java_cert notes: - - O(certificate) and O(private_key) require that their contents are available - on the controller (either inline in a playbook, or with the P(ansible.builtin.file#lookup) lookup), - while O(certificate_path) and O(private_key_path) require that the files are - available on the target host. - - By design, any change of a value of options O(keystore_type), O(name) or - O(password), as well as changes of key or certificate materials will cause - the existing O(dest) to be overwritten. -''' + - O(certificate) and O(private_key) require that their contents are available on the controller (either inline in a playbook, + or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the + files are available on the target host. + - By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate + materials will cause the existing O(dest) to be overwritten. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a keystore for the given certificate/private key pair (inline) community.general.java_keystore: name: example @@ -174,9 +163,9 @@ EXAMPLES = ''' private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key password: changeit dest: /etc/security/keystore.jks -''' +""" -RETURN = ''' +RETURN = r""" msg: description: Output from stdout of keytool/openssl command after execution of given command or an error. returned: changed and failure @@ -190,17 +179,17 @@ err: sample: "Keystore password is too short - must be at least 6 characters\n" rc: - description: keytool/openssl command execution return value + description: Keytool/openssl command execution return value. returned: changed and failure type: int sample: "0" cmd: - description: Executed command to get action done + description: Executed command to get action done. returned: changed and failure type: str sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256" -''' +""" import os diff --git a/plugins/modules/jboss.py b/plugins/modules/jboss.py index 3d07a38d63..2d4f4b9bad 100644 --- a/plugins/modules/jboss.py +++ b/plugins/modules/jboss.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: jboss short_description: Deploy applications to JBoss description: @@ -39,23 +39,23 @@ options: - The location in the filesystem where the deployment scanner listens. type: path state: - choices: [ present, absent ] + choices: [present, absent] default: "present" description: - Whether the application should be deployed or undeployed. type: str notes: - - The JBoss standalone deployment-scanner has to be enabled in standalone.xml - - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner. - Duration of waiting time depends on scan-interval parameter from standalone.xml. - - Ensure no identically named application is deployed through the JBoss CLI + - The JBoss standalone deployment-scanner has to be enabled in C(standalone.xml). + - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner. Duration of waiting time depends + on scan-interval parameter from C(standalone.xml). + - Ensure no identically named application is deployed through the JBoss CLI. seealso: -- name: WildFly reference - description: Complete reference of the WildFly documentation. - link: https://docs.wildfly.org + - name: WildFly reference + description: Complete reference of the WildFly documentation. + link: https://docs.wildfly.org author: - Jeroen Hoekx (@jhoekx) -''' +""" EXAMPLES = r""" - name: Deploy a hello world application to the default deploy_path diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py index 6d830849e7..a909eab690 100644 --- a/plugins/modules/jenkins_build.py +++ b/plugins/modules/jenkins_build.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_build short_description: Manage jenkins builds version_added: 2.2.0 description: - - Manage Jenkins builds with Jenkins REST API. + - Manage Jenkins builds with Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" author: @@ -64,7 +63,7 @@ options: type: str user: description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. type: str detach: description: @@ -79,9 +78,9 @@ options: default: 10 type: int version_added: 7.4.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a jenkins build using basic authentication community.general.jenkins_build: name: "test-check" @@ -108,10 +107,9 @@ EXAMPLES = ''' user: Jenkins token: abcdefghijklmnopqrstuvwxyz123456 url: http://localhost:8080 -''' +""" -RETURN = ''' ---- +RETURN = r""" name: description: Name of the jenkins job. returned: success @@ -136,7 +134,7 @@ build_info: description: Build info of the jenkins job. returned: success type: dict -''' +""" import traceback from time import sleep diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py index eae6eb9374..f252eb504a 100644 --- a/plugins/modules/jenkins_build_info.py +++ b/plugins/modules/jenkins_build_info.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_build_info short_description: Get information about Jenkins builds version_added: 7.4.0 description: - - Get information about Jenkins builds with Jenkins REST API. + - Get information about Jenkins builds with Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" author: @@ -48,11 +47,11 @@ options: type: str user: description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about a jenkins build using basic authentication community.general.jenkins_build_info: name: "test-check" @@ -74,10 +73,9 @@ EXAMPLES = ''' user: Jenkins token: abcdefghijklmnopqrstuvwxyz123456 url: http://localhost:8080 -''' +""" -RETURN = ''' ---- +RETURN = r""" name: description: Name of the jenkins job. returned: success @@ -102,7 +100,7 @@ build_info: description: Build info of the jenkins job. returned: success type: dict -''' +""" import traceback diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py index e8301041f2..f539e569e8 100644 --- a/plugins/modules/jenkins_job.py +++ b/plugins/modules/jenkins_job.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_job short_description: Manage jenkins jobs description: - - Manage Jenkins jobs by using Jenkins REST API. + - Manage Jenkins jobs by using Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" author: "Sergio Millan Rodriguez (@sermilrod)" @@ -28,7 +27,7 @@ options: config: type: str description: - - config in XML format. + - Config in XML format. - Required if job does not yet exist. - Mutually exclusive with O(enabled). - Considered if O(state=present). @@ -71,20 +70,19 @@ options: user: type: str description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. required: false validate_certs: type: bool default: true description: - - If set to V(false), the SSL certificates will not be validated. - This should only set to V(false) used on personally controlled sites - using self-signed certificates as it avoids verifying the source site. + - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally + controlled sites using self-signed certificates as it avoids verifying the source site. - The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY). version_added: 2.3.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a jenkins job using basic authentication community.general.jenkins_job: config: "{{ lookup('file', 'templates/test.xml') }}" @@ -132,10 +130,9 @@ EXAMPLES = ''' enabled: false url: http://localhost:8080 user: admin -''' +""" -RETURN = ''' ---- +RETURN = r""" name: description: Name of the jenkins job. returned: success @@ -161,7 +158,7 @@ url: returned: success type: str sample: https://jenkins.mydomain.com -''' +""" import os import traceback diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py index 40e1d7aea3..40aa416968 100644 --- a/plugins/modules/jenkins_job_info.py +++ b/plugins/modules/jenkins_job_info.py @@ -9,8 +9,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_job_info short_description: Get information about Jenkins jobs description: @@ -51,18 +50,18 @@ options: user: type: str description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. - - This should only set to V(false) used on personally controlled sites using self-signed certificates. + - If set to V(false), the SSL certificates will not be validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. default: true type: bool author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Get all Jenkins jobs anonymously - community.general.jenkins_job_info: user: admin @@ -122,24 +121,23 @@ EXAMPLES = ''' token: 126df5c60d66c66e3b75b11104a16a8a url: https://jenkins.example.com register: my_jenkins_job_info -''' +""" -RETURN = ''' ---- +RETURN = r""" jobs: - description: All jobs found matching the specified criteria + description: All jobs found matching the specified criteria. returned: success type: list sample: [ - { - "name": "test-job", - "fullname": "test-folder/test-job", - "url": "http://localhost:8080/job/test-job/", - "color": "blue" - }, + { + "name": "test-job", + "fullname": "test-folder/test-job", + "url": "http://localhost:8080/job/test-job/", + "color": "blue" + }, ] -''' +""" import ssl import fnmatch diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py index 9406eab4c5..affd462659 100644 --- a/plugins/modules/jenkins_node.py +++ b/plugins/modules/jenkins_node.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_node short_description: Manage Jenkins nodes version_added: 10.0.0 @@ -25,8 +24,7 @@ attributes: check_mode: support: partial details: - - Check mode is unable to show configuration changes for a node that is not yet - present. + - Check mode is unable to show configuration changes for a node that is not yet present. diff_mode: support: none options: @@ -50,8 +48,8 @@ options: type: str state: description: - - Specifies whether the Jenkins node should be V(present) (created), V(absent) - (deleted), V(enabled) (online) or V(disabled) (offline). + - Specifies whether the Jenkins node should be V(present) (created), V(absent) (deleted), V(enabled) (online) or V(disabled) + (offline). default: present choices: ['enabled', 'disabled', 'present', 'absent'] type: str @@ -66,18 +64,15 @@ options: elements: str offline_message: description: - - Specifies the offline reason message to be set when configuring the Jenkins node - state. - - If O(offline_message) is given and requested O(state) is not V(disabled), an - error will be raised. - - Internally O(offline_message) is set via the V(toggleOffline) API, so updating - the message when the node is already offline (current state V(disabled)) is not - possible. In this case, a warning will be issued. + - Specifies the offline reason message to be set when configuring the Jenkins node state. + - If O(offline_message) is given and requested O(state) is not V(disabled), an error will be raised. + - Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already + offline (current state V(disabled)) is not possible. In this case, a warning will be issued. type: str version_added: 10.0.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Jenkins node using token authentication community.general.jenkins_node: url: http://localhost:8080 @@ -105,12 +100,11 @@ EXAMPLES = ''' community.general.jenkins_node: name: my-node state: disabled - offline_message: > + offline_message: >- This node is offline for some reason. -''' +""" -RETURN = ''' ---- +RETURN = r""" url: description: URL used to connect to the Jenkins server. returned: success @@ -151,7 +145,7 @@ configured: description: Whether or not the Jenkins node was configured by the task. returned: success type: bool -''' +""" import sys import traceback diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py index 8834e0a2b2..2663b4fe35 100644 --- a/plugins/modules/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_plugin author: Jiri Tyr (@jtyr) short_description: Add or remove Jenkins plugin description: - Ansible module which helps to manage Jenkins plugins. - attributes: check_mode: support: full @@ -53,8 +51,7 @@ options: type: str description: - Desired plugin state. - - If set to V(latest), the check for new version will be performed - every time. This is suitable to keep the plugin up-to-date. + - If set to V(latest), the check for new version will be performed every time. This is suitable to keep the plugin up-to-date. choices: [absent, present, pinned, unpinned, enabled, disabled, latest] default: present timeout: @@ -65,12 +62,10 @@ options: updates_expiration: type: int description: - - Number of seconds after which a new copy of the C(update-center.json) - file is downloaded. This is used to avoid the need to download the - plugin to calculate its checksum when O(state=latest) is specified. - - Set it to V(0) if no cache file should be used. In that case, the - plugin file will always be downloaded to calculate its checksum when - O(state=latest) is specified. + - Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the + need to download the plugin to calculate its checksum when O(state=latest) is specified. + - Set it to V(0) if no cache file should be used. In that case, the plugin file will always be downloaded to calculate + its checksum when O(state=latest) is specified. default: 86400 updates_url: type: list @@ -109,12 +104,11 @@ options: type: str description: - Plugin version number. - - If this option is specified, all plugin dependencies must be installed - manually. - - It might take longer to verify that the correct version is installed. - This is especially true if a specific version number is specified. - - Quote the version to prevent the value to be interpreted as float. For - example if V(1.20) would be unquoted, it would become V(1.2). + - If this option is specified, all plugin dependencies must be installed manually. + - It might take longer to verify that the correct version is installed. This is especially true if a specific version + number is specified. + - Quote the version to prevent the value to be interpreted as float. For example if V(1.20) would be unquoted, it would + become V(1.2). with_dependencies: description: - Defines whether to install plugin dependencies. @@ -123,24 +117,20 @@ options: default: true notes: - - Plugin installation should be run under root or the same user which owns - the plugin files on the disk. Only if the plugin is not installed yet and - no version is specified, the API installation is performed which requires - only the Web UI credentials. - - It is necessary to notify the handler or call the M(ansible.builtin.service) module to - restart the Jenkins service after a new plugin was installed. - - Pinning works only if the plugin is installed and Jenkins service was - successfully restarted after the plugin installation. - - It is not possible to run the module remotely by changing the O(url) - parameter to point to the Jenkins server. The module must be used on the - host where Jenkins runs as it needs direct access to the plugin files. + - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin + is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials. + - It is necessary to notify the handler or call the M(ansible.builtin.service) module to restart the Jenkins service after + a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkins service was successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the O(url) parameter to point to the Jenkins server. The module + must be used on the host where Jenkins runs as it needs direct access to the plugin files. extends_documentation_fragment: - ansible.builtin.url - ansible.builtin.files - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install plugin community.general.jenkins_plugin: name: build-pipeline-plugin @@ -281,8 +271,8 @@ EXAMPLES = ''' retries: 60 delay: 5 until: > - 'status' in jenkins_service_status and - jenkins_service_status['status'] == 200 + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 when: jenkins_restart_required - name: Reset the fact @@ -305,20 +295,20 @@ EXAMPLES = ''' when: > 'enabled' in item.value with_dict: "{{ my_jenkins_plugins }}" -''' +""" -RETURN = ''' +RETURN = r""" plugin: - description: plugin name - returned: success - type: str - sample: build-pipeline-plugin + description: Plugin name. + returned: success + type: str + sample: build-pipeline-plugin state: - description: state of the target, after execution - returned: success - type: str - sample: "present" -''' + description: State of the target, after execution. + returned: success + type: str + sample: "present" +""" import hashlib import io diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py index 030c8e6fa3..0f6064eeca 100644 --- a/plugins/modules/jenkins_script.py +++ b/plugins/modules/jenkins_script.py @@ -9,17 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: James Hogarth (@hogarthj) module: jenkins_script short_description: Executes a groovy script in the jenkins instance description: - - The C(jenkins_script) module takes a script plus a dict of values - to use within the script and returns the result of the script being run. - + - The C(jenkins_script) module takes a script plus a dict of values to use within the script and returns the result of the + script being run. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -31,20 +29,18 @@ options: script: type: str description: - - The groovy script to be executed. - This gets passed as a string Template if args is defined. + - The groovy script to be executed. This gets passed as a string Template if args is defined. required: true url: type: str description: - - The jenkins server to execute the script against. The default is a local - jenkins instance that is not being proxied through a webserver. + - The jenkins server to execute the script against. The default is a local jenkins instance that is not being proxied + through a webserver. default: http://localhost:8080 validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. - This should only set to V(false) used on personally controlled sites - using self-signed certificates as it avoids verifying the source site. + - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally + controlled sites using self-signed certificates as it avoids verifying the source site. type: bool default: true user: @@ -58,21 +54,18 @@ options: timeout: type: int description: - - The request timeout in seconds + - The request timeout in seconds. default: 10 args: type: dict description: - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). - notes: - - Since the script can do anything this does not report on changes. - Knowing the script is being run it's important to set changed_when - for the ansible output to be clear on any alterations made. + - Since the script can do anything this does not report on changes. Knowing the script is being run it's important to set + changed_when for the ansible output to be clear on any alterations made. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Obtaining a list of plugins community.general.jenkins_script: script: 'println(Jenkins.instance.pluginManager.plugins)' @@ -82,10 +75,10 @@ EXAMPLES = ''' - name: Setting master using a variable to hold a more complicate script ansible.builtin.set_fact: setmaster_mode: | - import jenkins.model.* - instance = Jenkins.getInstance() - instance.setMode(${jenkins_mode}) - instance.save() + import jenkins.model.* + instance = Jenkins.getInstance() + instance.setMode(${jenkins_mode}) + instance.save() - name: Use the variable as the script community.general.jenkins_script: @@ -99,16 +92,16 @@ EXAMPLES = ''' user: admin password: admin url: https://localhost - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' +RETURN = r""" output: - description: Result of script - returned: success - type: str - sample: 'Result: true' -''' + description: Result of script. + returned: success + type: str + sample: 'Result: true' +""" import json diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index 64aed7e149..cc3136c3bf 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -20,7 +20,6 @@ module: jira short_description: Create and modify issues in a JIRA instance description: - Create and modify issues in a JIRA instance. - extends_documentation_fragment: - community.general.attributes @@ -36,28 +35,24 @@ options: required: true description: - Base URI for the JIRA instance. - operation: type: str required: true - aliases: [ command ] - choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ] + aliases: [command] + choices: [attach, comment, create, edit, fetch, link, search, transition, update, worklog] description: - The operation to perform. - V(worklog) was added in community.general 6.5.0. - username: type: str description: - The username to log-in with. - Must be used with O(password). Mutually exclusive with O(token). - password: type: str description: - The password to log-in with. - - Must be used with O(username). Mutually exclusive with O(token). - + - Must be used with O(username). Mutually exclusive with O(token). token: type: str description: @@ -70,56 +65,54 @@ options: required: false description: - The project for this operation. Required for issue creation. - summary: type: str required: false description: - - The issue summary, where appropriate. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The issue summary, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. description: type: str required: false description: - - The issue description, where appropriate. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The issue description, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. issuetype: type: str required: false description: - - The issue type, for issue creation. - + - The issue type, for issue creation. issue: type: str required: false description: - - An existing issue key to operate on. + - An existing issue key to operate on. aliases: ['ticket'] comment: type: str required: false description: - - The comment text to add. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The comment text to add. + - Note that JIRA may not allow changing field values on specific transitions or states. comment_visibility: type: dict description: - - Used to specify comment comment visibility. - - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details. + - Used to specify comment comment visibility. + - See + U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) + for details. suboptions: type: description: - - Use type to specify which of the JIRA visibility restriction types will be used. + - Use type to specify which of the JIRA visibility restriction types will be used. type: str required: true choices: [group, role] value: description: - - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role. + - Use value to specify value corresponding to the type of visibility restriction. For example name of the group + or role. type: str required: true version_added: '3.2.0' @@ -128,63 +121,59 @@ options: type: str required: false description: - - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name. - + - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name. assignee: type: str required: false description: - - Sets the the assignee when O(operation) is V(create), V(transition), or V(edit). - - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - Sets the the assignee when O(operation) is V(create), V(transition), or V(edit). + - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead. + - Note that JIRA may not allow changing field values on specific transitions or states. account_id: type: str description: - - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit). - - Note that JIRA may not allow changing field values on specific transitions or states. + - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit). + - Note that JIRA may not allow changing field values on specific transitions or states. version_added: 2.5.0 linktype: type: str required: false description: - - Set type of link, when action 'link' selected. - + - Set type of link, when action 'link' selected. inwardissue: type: str required: false description: - - Set issue from which link will be created. - + - Set issue from which link will be created. outwardissue: type: str required: false description: - - Set issue to which link will be created. - + - Set issue to which link will be created. fields: type: dict required: false description: - - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API - (possibly after merging with other required data, as when passed to create). See examples for more information, - and the JIRA REST API for the structure required for various fields. - - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example. - - Note that JIRA may not allow changing field values on specific transitions or states. + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly + after merging with other required data, as when passed to create). See examples for more information, and the JIRA + REST API for the structure required for various fields. + - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add + JIRA properties for example. + - Note that JIRA may not allow changing field values on specific transitions or states. default: {} jql: required: false description: - - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'. + - Query JIRA in JQL Syntax, for example V("CMDB Hostname" = test.example.com). type: str version_added: '0.2.0' maxresults: required: false description: - - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used. - - Used when O(operation=search) only, ignored otherwise. + - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used. + - Used when O(operation=search) only, ignored otherwise. type: int version_added: '0.2.0' @@ -198,7 +187,7 @@ options: validate_certs: required: false description: - - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates) + - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates). default: true type: bool @@ -212,27 +201,24 @@ options: required: true type: path description: - - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified, - the filename to use for the attachment. + - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified, the filename + to use for the attachment. content: type: str description: - - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will be - used instead. + - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will + be used instead. mimetype: type: str description: - - The MIME type to supply for the upload. If not specified, best-effort detection will be - done. - + - The MIME type to supply for the upload. If not specified, best-effort detection will be done. notes: - - "Currently this only works with basic-auth, or tokens." - - "To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password)." - + - Currently this only works with basic-auth, or tokens. + - To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password). author: -- "Steve Smith (@tarka)" -- "Per Abildgaard Toft (@pertoft)" -- "Brandon McNama (@DWSR)" + - "Steve Smith (@tarka)" + - "Per Abildgaard Toft (@pertoft)" + - "Brandon McNama (@DWSR)" """ EXAMPLES = r""" @@ -249,8 +235,8 @@ EXAMPLES = r""" issuetype: Task args: fields: - customfield_13225: "test" - customfield_12931: {"value": "Test"} + customfield_13225: "test" + customfield_12931: {"value": "Test"} register: issue - name: Comment on issue @@ -362,9 +348,9 @@ EXAMPLES = r""" operation: edit args: fields: - labels: - - autocreated - - ansible + labels: + - autocreated + - ansible # Updating a field using operations: add, set & remove - name: Change the value of a Select dropdown @@ -376,8 +362,8 @@ EXAMPLES = r""" operation: update args: fields: - customfield_12931: [ {'set': {'value': 'Virtual'}} ] - customfield_13820: [ {'set': {'value':'Manually'}} ] + customfield_12931: ['set': {'value': 'Virtual'}] + customfield_13820: ['set': {'value': 'Manually'}] register: cmdb_issue delegate_to: localhost @@ -406,7 +392,7 @@ EXAMPLES = r""" jql: project=cmdb AND cf[13225]="test" args: fields: - lastViewed: null + lastViewed: register: issue - name: Create a unix account for the reporter From a9fca563746c361f7fcb558c257412a9793f7714 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 27 Dec 2024 01:40:55 +1300 Subject: [PATCH 415/482] ma*.py: normalize docs (#9389) * ma*.py: normalize docs * Update plugins/modules/matrix.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- plugins/modules/macports.py | 88 +++--- plugins/modules/mail.py | 111 ++++---- plugins/modules/make.py | 13 +- plugins/modules/manageiq_alert_profiles.py | 32 +-- plugins/modules/manageiq_alerts.py | 59 ++-- plugins/modules/manageiq_group.py | 102 ++++--- plugins/modules/manageiq_policies.py | 27 +- plugins/modules/manageiq_policies_info.py | 19 +- plugins/modules/manageiq_provider.py | 94 +++---- plugins/modules/manageiq_tags.py | 51 ++-- plugins/modules/manageiq_tags_info.py | 19 +- plugins/modules/manageiq_tenant.py | 70 ++--- plugins/modules/manageiq_user.py | 32 +-- plugins/modules/mas.py | 80 +++--- plugins/modules/matrix.py | 91 +++--- plugins/modules/mattermost.py | 39 ++- plugins/modules/maven_artifact.py | 308 ++++++++++----------- 17 files changed, 596 insertions(+), 639 deletions(-) diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py index cd620687d7..3777b86c2f 100644 --- a/plugins/modules/macports.py +++ b/plugins/modules/macports.py @@ -12,54 +12,54 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: macports author: "Jimmy Tang (@jcftang)" short_description: Package manager for MacPorts description: - - Manages MacPorts packages (ports) + - Manages MacPorts packages (ports). extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - A list of port names. - aliases: ['port'] - type: list - elements: str - selfupdate: - description: - - Update Macports and the ports tree, either prior to installing ports or as a separate step. - - Equivalent to running C(port selfupdate). - aliases: ['update_cache', 'update_ports'] - default: false - type: bool - state: - description: - - Indicates the desired state of the port. - choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed'] - default: present - type: str - upgrade: - description: - - Upgrade all outdated ports, either prior to installing ports or as a separate step. - - Equivalent to running C(port upgrade outdated). - default: false - type: bool - variant: - description: - - A port variant specification. - - 'O(variant) is only supported with O(state=installed) and O(state=present).' - aliases: ['variants'] - type: str -''' -EXAMPLES = ''' + name: + description: + - A list of port names. + aliases: ['port'] + type: list + elements: str + selfupdate: + description: + - Update Macports and the ports tree, either prior to installing ports or as a separate step. + - Equivalent to running C(port selfupdate). + aliases: ['update_cache', 'update_ports'] + default: false + type: bool + state: + description: + - Indicates the desired state of the port. + choices: ['present', 'absent', 'active', 'inactive', 'installed', 'removed'] + default: present + type: str + upgrade: + description: + - Upgrade all outdated ports, either prior to installing ports or as a separate step. + - Equivalent to running C(port upgrade outdated). + default: false + type: bool + variant: + description: + - A port variant specification. + - O(variant) is only supported with O(state=installed) and O(state=present). + aliases: ['variants'] + type: str +""" + +EXAMPLES = r""" - name: Install the foo port community.general.macports: name: foo @@ -74,8 +74,8 @@ EXAMPLES = ''' name: "{{ ports }}" vars: ports: - - foo - - foo-tools + - foo + - foo-tools - name: Update Macports and the ports tree, then upgrade all outdated ports community.general.macports: @@ -101,7 +101,7 @@ EXAMPLES = ''' community.general.macports: name: foo state: inactive -''' +""" import re diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py index 1916c140c3..0cca8646ca 100644 --- a/plugins/modules/mail.py +++ b/plugins/modules/mail.py @@ -9,27 +9,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) module: mail short_description: Send an email description: -- This module is useful for sending emails from playbooks. -- One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. -- If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make them perform their - part of the bargain is an elegant way to put the responsibility in - someone else's lap. -- Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. + - This module is useful for sending emails from playbooks. + - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot be automated, either + because you lack the authority to make it so, or because not everyone agrees to a common approach. + - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to make them perform their + part of the bargain is an elegant way to put the responsibility in someone else's lap. + - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action has been (successfully) + taken. extends_documentation_fragment: -- community.general.attributes + - community.general.attributes attributes: check_mode: support: none @@ -38,106 +32,105 @@ attributes: options: sender: description: - - The email-address the mail is sent from. May contain address and phrase. + - The email-address the mail is sent from. May contain address and phrase. type: str default: root - aliases: [ from ] + aliases: [from] to: description: - - The email-address(es) the mail is being sent to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being sent to. + - This is a list, which may contain address and phrase portions. type: list elements: str default: root - aliases: [ recipients ] + aliases: [recipients] cc: description: - - The email-address(es) the mail is being copied to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being copied to. + - This is a list, which may contain address and phrase portions. type: list elements: str default: [] bcc: description: - - The email-address(es) the mail is being 'blind' copied to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being 'blind' copied to. + - This is a list, which may contain address and phrase portions. type: list elements: str default: [] subject: description: - - The subject of the email being sent. + - The subject of the email being sent. required: true type: str - aliases: [ msg ] + aliases: [msg] body: description: - - The body of the email being sent. + - The body of the email being sent. type: str username: description: - - If SMTP requires username. + - If SMTP requires username. type: str password: description: - - If SMTP requires password. + - If SMTP requires password. type: str host: description: - - The mail server. + - The mail server. type: str default: localhost port: description: - - The mail server port. - - This must be a valid integer between 1 and 65534 + - The mail server port. + - This must be a valid integer between V(1) and V(65534). type: int default: 25 attach: description: - - A list of pathnames of files to attach to the message. - - Attached files will have their content-type set to C(application/octet-stream). + - A list of pathnames of files to attach to the message. + - Attached files will have their content-type set to C(application/octet-stream). type: list elements: path default: [] headers: description: - - A list of headers which should be added to the message. - - Each individual header is specified as C(header=value) (see example below). + - A list of headers which should be added to the message. + - Each individual header is specified as V(header=value) (see example below). type: list elements: str default: [] charset: description: - - The character set of email being sent. + - The character set of email being sent. type: str default: utf-8 subtype: description: - - The minor mime type, can be either V(plain) or V(html). - - The major type is always V(text). + - The minor mime type, can be either V(plain) or V(html). + - The major type is always V(text). type: str - choices: [ html, plain ] + choices: [html, plain] default: plain secure: description: - - If V(always), the connection will only send email if the connection is Encrypted. - If the server doesn't accept the encrypted connection it will fail. - - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. - - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending - - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. - If it is unable to do so it will fail. + - If V(always), the connection will only send email if the connection is Encrypted. If the server does not accept the encrypted connection + it will fail. + - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. + - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending. + - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. If it is unable to do so it will fail. type: str - choices: [ always, never, starttls, try ] + choices: [always, never, starttls, try] default: try timeout: description: - - Sets the timeout in seconds for connection attempts. + - Sets the timeout in seconds for connection attempts. type: int default: 20 ehlohost: description: - - Allows for manual specification of host for EHLO. + - Allows for manual specification of host for EHLO. type: str version_added: 3.8.0 message_id_domain: @@ -147,9 +140,9 @@ options: type: str default: ansible version_added: 8.2.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example playbook sending mail to root community.general.mail: subject: System {{ ansible_hostname }} has been successfully provisioned. @@ -174,15 +167,15 @@ EXAMPLES = r''' body: Hello, this is an e-mail. I hope you like it ;-) from: jane@example.net (Jane Jolie) to: - - John Doe - - Suzie Something + - John Doe + - Suzie Something cc: Charlie Root attach: - - /etc/group - - /tmp/avatar2.png + - /etc/group + - /tmp/avatar2.png headers: - - Reply-To=john@example.com - - X-Special="Something or other" + - Reply-To=john@example.com + - X-Special="Something or other" charset: us-ascii delegate_to: localhost @@ -222,7 +215,7 @@ EXAMPLES = r''' subject: Ansible-report body: System {{ ansible_hostname }} has been successfully provisioned. secure: starttls -''' +""" import os import smtplib diff --git a/plugins/modules/make.py b/plugins/modules/make.py index 39392afca6..a574560f7f 100644 --- a/plugins/modules/make.py +++ b/plugins/modules/make.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: make short_description: Run targets in a Makefile requirements: @@ -65,9 +64,9 @@ options: type: list elements: str version_added: 7.2.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Build the default target community.general.make: chdir: /home/ubuntu/cool-project @@ -103,9 +102,9 @@ EXAMPLES = r''' # The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line: TARGET: arm64 TARGET_ARCH: aarch64 -''' +""" -RETURN = r''' +RETURN = r""" chdir: description: - The value of the module parameter O(chdir). @@ -143,7 +142,7 @@ targets: type: str returned: success version_added: 7.2.0 -''' +""" from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import shlex_quote diff --git a/plugins/modules/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py index eb6424bcdd..33ca05df3b 100644 --- a/plugins/modules/manageiq_alert_profiles.py +++ b/plugins/modules/manageiq_alert_profiles.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_alert_profiles short_description: Configuration of alert profiles for ManageIQ @@ -20,7 +19,6 @@ extends_documentation_fragment: author: Elad Alfassa (@elad661) description: - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. - attributes: check_mode: support: none @@ -31,8 +29,8 @@ options: state: type: str description: - - absent - alert profile should not exist, - - present - alert profile should exist, + - V(absent) - alert profile should not exist, + - V(present) - alert profile should exist. choices: ['absent', 'present'] default: 'present' name: @@ -43,23 +41,21 @@ options: resource_type: type: str description: - - The resource type for the alert profile in ManageIQ. Required when state is "present". - choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', - 'ExtManagementSystem', 'MiddlewareServer'] + - The resource type for the alert profile in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] alerts: type: list elements: str description: - List of alert descriptions to assign to this profile. - - Required if state is "present" + - Required if O(state=present). notes: type: str description: - - Optional notes for this profile + - Optional notes for this profile. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add an alert profile to ManageIQ community.general.manageiq_alert_profiles: state: present @@ -72,7 +68,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete an alert profile from ManageIQ community.general.manageiq_alert_profiles: @@ -82,11 +78,11 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py index 53f40fb00c..9dd50af846 100644 --- a/plugins/modules/manageiq_alerts.py +++ b/plugins/modules/manageiq_alerts.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_alerts short_description: Configuration of alerts in ManageIQ @@ -20,7 +19,6 @@ extends_documentation_fragment: author: Elad Alfassa (@elad661) description: - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ. - attributes: check_mode: support: none @@ -31,8 +29,8 @@ options: state: type: str description: - - absent - alert should not exist, - - present - alert should exist, + - V(absent) - alert should not exist, + - V(present) - alert should exist. required: false choices: ['absent', 'present'] default: 'present' @@ -44,9 +42,8 @@ options: resource_type: type: str description: - - The entity type for the alert in ManageIQ. Required when state is "present". - choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', - 'ExtManagementSystem', 'MiddlewareServer'] + - The entity type for the alert in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] expression_type: type: str description: @@ -58,20 +55,18 @@ options: description: - The alert expression for ManageIQ. - Can either be in the "Miq Expression" format or the "Hash Expression format". - - Required if state is "present". + - Required if O(state=present). enabled: description: - - Enable or disable the alert. Required if state is "present". + - Enable or disable the alert. Required if O(state=present). type: bool options: type: dict description: - - Additional alert options, such as notification type and frequency + - Additional alert options, such as notification type and frequency. +""" - -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add an alert with a "hash expression" to ManageIQ community.general.manageiq_alerts: state: present @@ -83,15 +78,15 @@ EXAMPLES = ''' from: "example@example.com" resource_type: ContainerNode expression: - eval_method: hostd_log_threshold - mode: internal - options: {} + eval_method: hostd_log_threshold + mode: internal + options: {} enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Add an alert with a "miq expression" to ManageIQ community.general.manageiq_alerts: @@ -105,20 +100,20 @@ EXAMPLES = ''' resource_type: Vm expression_type: miq expression: - and: - - CONTAINS: - tag: Vm.managed-environment - value: prod - - not: - CONTAINS: - tag: Vm.host.managed-environment - value: prod + and: + - CONTAINS: + tag: Vm.managed-environment + value: prod + - not: + CONTAINS: + tag: Vm.host.managed-environment + value: prod enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete an alert from ManageIQ community.general.manageiq_alerts: @@ -128,11 +123,11 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py index e060b9a01a..9858dd5947 100644 --- a/plugins/modules/manageiq_group.py +++ b/plugins/modules/manageiq_group.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_group short_description: Management of groups in ManageIQ @@ -33,70 +32,69 @@ options: state: type: str description: - - absent - group should not exist, present - group should be. + - V(absent) - group should not exist, + - V(present) - group should exist. choices: ['absent', 'present'] default: 'present' description: type: str description: - - The group description. + - The group description. required: true - default: null role_id: type: int description: - - The the group role id + - The the group role id. required: false - default: null role: type: str description: - - The the group role name - - The O(role_id) has precedence over the O(role) when supplied. + - The the group role name. + - The O(role_id) has precedence over the O(role) when supplied. required: false - default: null + default: tenant_id: type: int description: - - The tenant for the group identified by the tenant id. + - The tenant for the group identified by the tenant id. required: false - default: null + default: tenant: type: str description: - - The tenant for the group identified by the tenant name. - - The O(tenant_id) has precedence over the O(tenant) when supplied. - - Tenant names are case sensitive. + - The tenant for the group identified by the tenant name. + - The O(tenant_id) has precedence over the O(tenant) when supplied. + - Tenant names are case sensitive. required: false - default: null + default: managed_filters: - description: The tag values per category + description: The tag values per category. type: dict required: false - default: null + default: managed_filters_merge_mode: type: str description: - - In merge mode existing categories are kept or updated, new categories are added. - - In replace mode all categories will be replaced with the supplied O(managed_filters). - choices: [ merge, replace ] + - In merge mode existing categories are kept or updated, new categories are added. + - In replace mode all categories will be replaced with the supplied O(managed_filters). + choices: [merge, replace] default: replace belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder + description: A list of strings with a reference to the allowed host, cluster or folder. type: list elements: str required: false - default: null + default: belongsto_filters_merge_mode: type: str description: - - In merge mode existing settings are merged with the supplied O(belongsto_filters). - - In replace mode current values are replaced with the supplied O(belongsto_filters). - choices: [ merge, replace ] + - In merge mode existing settings are merged with the supplied O(belongsto_filters). + - In replace mode current values are replaced with the supplied O(belongsto_filters). + choices: [merge, replace] default: replace -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' community.general.manageiq_group: description: 'MyGroup-user' @@ -106,7 +104,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 community.general.manageiq_group: @@ -117,33 +115,33 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: - - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. - - Apply 3 prov_max_cpu and 2 department tags to the group. - - Limit access to a cluster for the group. + - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. + - Apply 3 prov_max_cpu and 2 department tags to the group. + - Limit access to a cluster for the group. community.general.manageiq_group: description: 'MyGroup-user' role: 'EvmRole-user' tenant: my_tenant managed_filters: prov_max_cpu: - - '1' - - '2' - - '4' + - '1' + - '2' + - '4' department: - - defense - - engineering + - defense + - engineering managed_filters_merge_mode: replace belongsto_filters: - - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" + - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" belongsto_filters_merge_mode: merge manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete a group in ManageIQ community.general.manageiq_group: @@ -161,53 +159,53 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' -''' +""" -RETURN = ''' +RETURN = r""" group: description: The group. returned: success type: complex contains: description: - description: The group description + description: The group description. returned: success type: str id: - description: The group id + description: The group id. returned: success type: int group_type: - description: The group type, system or user + description: The group type, system or user. returned: success type: str role: - description: The group role name + description: The group role name. returned: success type: str tenant: - description: The group tenant name + description: The group tenant name. returned: success type: str managed_filters: - description: The tag values per category + description: The tag values per category. returned: success type: dict belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder + description: A list of strings with a reference to the allowed host, cluster or folder. returned: success type: list created_on: - description: Group creation date + description: Group creation date. returned: success type: str sample: "2018-08-12T08:37:55+00:00" updated_on: - description: Group update date + description: Group update date. returned: success type: int sample: "2018-08-12T08:37:55+00:00" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py index f2101ad28b..e53388f293 100644 --- a/plugins/modules/manageiq_policies.py +++ b/plugins/modules/manageiq_policies.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_policies short_description: Management of resource policy_profiles in ManageIQ @@ -21,7 +20,6 @@ extends_documentation_fragment: author: Daniel Korn (@dkorn) description: - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. - attributes: check_mode: support: none @@ -33,7 +31,7 @@ options: type: str description: - V(absent) - policy_profiles should not exist, - - V(present) - policy_profiles should exist, + - V(present) - policy_profiles should exist. choices: ['absent', 'present'] default: 'present' policy_profiles: @@ -47,9 +45,8 @@ options: description: - The type of the resource to which the profile should be [un]assigned. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] resource_name: type: str description: @@ -61,9 +58,9 @@ options: - The ID of the resource to which the profile should be [un]assigned. - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. version_added: 2.2.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Assign new policy_profile for a provider in ManageIQ community.general.manageiq_policies: resource_name: 'EngLab' @@ -74,7 +71,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Unassign a policy_profile for a provider in ManageIQ community.general.manageiq_policies: @@ -87,13 +84,13 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' +RETURN = r""" manageiq_policies: description: - - List current policy_profile and policies for a provider in ManageIQ + - List current policy_profile and policies for a provider in ManageIQ. returned: always type: dict sample: '{ @@ -122,7 +119,7 @@ manageiq_policies: } ] }' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py index fda7dcadfe..f4235203ab 100644 --- a/plugins/modules/manageiq_policies_info.py +++ b/plugins/modules/manageiq_policies_info.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_policies_info version_added: 5.8.0 @@ -24,16 +23,14 @@ extends_documentation_fragment: author: Alexei Znamensky (@russoz) description: - The manageiq_policies module supports listing policy_profiles in ManageIQ. - options: resource_type: type: str description: - The type of the resource to obtain the profile for. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] resource_name: type: str description: @@ -44,9 +41,9 @@ options: description: - The ID of the resource to obtain the profile for. - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List current policy_profile and policies for a provider in ManageIQ community.general.manageiq_policies_info: resource_name: 'EngLab' @@ -56,9 +53,9 @@ EXAMPLES = ''' username: 'admin' password: 'smartvm' register: result -''' +""" -RETURN = ''' +RETURN = r""" profiles: description: - List current policy_profile and policies for a provider in ManageIQ. @@ -78,7 +75,7 @@ profiles: name: schedule compliance after smart state analysis profile_description: OpenSCAP profile profile_name: openscap profile -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index 35c73a38b3..98677c7beb 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: manageiq_provider short_description: Management of provider in ManageIQ extends_documentation_fragment: @@ -19,7 +19,6 @@ extends_documentation_fragment: author: Daniel Korn (@dkorn) description: - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. - attributes: check_mode: support: none @@ -30,7 +29,9 @@ options: state: type: str description: - - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed + - V(absent) - provider should not exist, + - V(present) - provider should be present, + - V(refresh) - provider will be refreshed. choices: ['absent', 'present', 'refresh'] default: 'present' name: @@ -47,30 +48,30 @@ options: default: 'default' provider_region: type: str - description: The provider region name to connect to (e.g. AWS region for Amazon). + description: The provider region name to connect to (for example AWS region for Amazon). host_default_vnc_port_start: type: str - description: The first port in the host VNC range. defaults to None. + description: The first port in the host VNC range. host_default_vnc_port_end: type: str - description: The last port in the host VNC range. defaults to None. + description: The last port in the host VNC range. subscription: type: str - description: Microsoft Azure subscription ID. defaults to None. + description: Microsoft Azure subscription ID. project: type: str - description: Google Compute Engine Project ID. defaults to None. + description: Google Compute Engine Project ID. azure_tenant_id: type: str description: Tenant ID. defaults to None. - aliases: [ keystone_v3_domain_id ] + aliases: [keystone_v3_domain_id] tenant_mapping_enabled: type: bool default: false - description: Whether to enable mapping of existing tenants. defaults to False. + description: Whether to enable mapping of existing tenants. api_version: type: str - description: The OpenStack Keystone API version. defaults to None. + description: The OpenStack Keystone API version. choices: ['v2', 'v3'] provider: @@ -79,32 +80,32 @@ options: suboptions: hostname: type: str - description: The provider's api hostname. + description: The provider's API hostname. required: true port: type: int - description: The provider's api port. + description: The provider's API port. userid: type: str - description: Provider's api endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. password: type: str - description: Provider's api endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. auth_key: type: str - description: Provider's api endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). type: bool default: true - aliases: [ verify_ssl ] + aliases: [verify_ssl] security_protocol: type: str - description: How SSL certificates should be used for HTTPS requests. defaults to None. - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + description: How SSL certificates should be used for HTTPS requests. + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] certificate_authority: type: str - description: The CA bundle string with custom certificates. defaults to None. + description: The CA bundle string with custom certificates. path: type: str description: @@ -125,39 +126,38 @@ options: type: str description: - TODO needs documentation. - metrics: description: Metrics endpoint connection information. type: dict suboptions: hostname: type: str - description: The provider's api hostname. + description: The provider's API hostname. required: true port: type: int - description: The provider's api port. + description: The provider's API port. userid: type: str - description: Provider's api endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. password: type: str - description: Provider's api endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. auth_key: type: str - description: Provider's api endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). type: bool default: true - aliases: [ verify_ssl ] + aliases: [verify_ssl] security_protocol: type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. certificate_authority: type: str - description: The CA bundle string with custom certificates. defaults to None. + description: The CA bundle string with custom certificates. path: type: str description: Database name for oVirt metrics. Defaults to V(ovirt_engine_history). @@ -177,35 +177,34 @@ options: type: str description: - TODO needs documentation. - alerts: description: Alerts endpoint connection information. type: dict suboptions: hostname: type: str - description: The provider's api hostname. + description: The provider's API hostname. required: true port: type: int - description: The provider's api port. + description: The provider's API port. userid: type: str - description: Provider's api endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. defaults to None. password: type: str - description: Provider's api endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. defaults to None. auth_key: type: str - description: Provider's api endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. defaults to None. validate_certs: type: bool description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. default: true - aliases: [ verify_ssl ] + aliases: [verify_ssl] security_protocol: type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl'] + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] description: How SSL certificates should be used for HTTPS requests. defaults to None. certificate_authority: type: str @@ -230,7 +229,6 @@ options: type: str description: - TODO needs documentation. - ssh_keypair: description: SSH key pair used for SSH connections to all hosts in this provider. type: dict @@ -250,10 +248,10 @@ options: - Whether certificates should be verified for connections. type: bool default: true - aliases: [ verify_ssl ] + aliases: [verify_ssl] security_protocol: type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl'] + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] description: - TODO needs documentation. certificate_authority: @@ -288,9 +286,9 @@ options: type: int description: - TODO needs documentation. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new provider in ManageIQ ('Hawkular' metrics) community.general.manageiq_provider: name: 'EngLab' @@ -507,10 +505,10 @@ EXAMPLES = ''' hostname: 'gce.example.com' auth_key: 'google_json_key' validate_certs: 'false' -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py index 3ab5eca4f8..bae59353cf 100644 --- a/plugins/modules/manageiq_tags.py +++ b/plugins/modules/manageiq_tags.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_tags short_description: Management of resource tags in ManageIQ @@ -21,7 +20,6 @@ extends_documentation_fragment: author: Daniel Korn (@dkorn) description: - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. - attributes: check_mode: support: none @@ -32,7 +30,7 @@ options: state: type: str description: - - V(absent) - tags should not exist. + - V(absent) - tags should not exist, - V(present) - tags should exist. choices: ['absent', 'present'] default: 'present' @@ -47,9 +45,8 @@ options: description: - The relevant resource type in manageiq. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] resource_name: type: str description: @@ -61,38 +58,38 @@ options: - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. type: int version_added: 2.2.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new tags for a provider in ManageIQ. community.general.manageiq_tags: resource_name: 'EngLab' resource_type: 'provider' tags: - - category: environment - name: prod - - category: owner - name: prod_ops + - category: environment + name: prod + - category: owner + name: prod_ops manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when connecting to localhost! + validate_certs: false # only do this when connecting to localhost! - name: Create new tags for a provider in ManageIQ. community.general.manageiq_tags: resource_id: 23000000790497 resource_type: 'provider' tags: - - category: environment - name: prod - - category: owner - name: prod_ops + - category: environment + name: prod + - category: owner + name: prod_ops manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when connecting to localhost! + validate_certs: false # only do this when connecting to localhost! - name: Remove tags for a provider in ManageIQ. community.general.manageiq_tags: @@ -100,19 +97,19 @@ EXAMPLES = ''' resource_name: 'EngLab' resource_type: 'provider' tags: - - category: environment - name: prod - - category: owner - name: prod_ops + - category: environment + name: prod + - category: owner + name: prod_ops manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when connecting to localhost! -''' + validate_certs: false # only do this when connecting to localhost! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ( diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py index 75e111540b..5d32104e7a 100644 --- a/plugins/modules/manageiq_tags_info.py +++ b/plugins/modules/manageiq_tags_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_tags_info version_added: 5.8.0 short_description: Retrieve resource tags in ManageIQ @@ -22,16 +21,14 @@ extends_documentation_fragment: author: Alexei Znamensky (@russoz) description: - This module supports retrieving resource tags from ManageIQ. - options: resource_type: type: str description: - The relevant resource type in ManageIQ. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] resource_name: type: str description: @@ -42,9 +39,9 @@ options: - The ID of the resource at which tags will be controlled. - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. type: int -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List current tags for a provider in ManageIQ. community.general.manageiq_tags_info: resource_name: 'EngLab' @@ -54,15 +51,15 @@ EXAMPLES = ''' username: 'admin' password: 'smartvm' register: result -''' +""" -RETURN = ''' +RETURN = r""" tags: description: List of tags associated with the resource. returned: on success type: list elements: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ( diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py index a5a56191e7..4700e46356 100644 --- a/plugins/modules/manageiq_tenant.py +++ b/plugins/modules/manageiq_tenant.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_tenant short_description: Management of tenants in ManageIQ @@ -31,7 +30,8 @@ options: state: type: str description: - - absent - tenant should not exist, present - tenant should be. + - V(absent) - tenant should not exist, + - V(present) - tenant should be. choices: ['absent', 'present'] default: 'present' name: @@ -39,42 +39,42 @@ options: description: - The tenant name. required: true - default: null + default: description: type: str description: - - The tenant description. + - The tenant description. required: true - default: null + default: parent_id: type: int description: - - The id of the parent tenant. If not supplied the root tenant is used. - - The O(parent_id) takes president over O(parent) when supplied + - The id of the parent tenant. If not supplied the root tenant is used. + - The O(parent_id) takes president over O(parent) when supplied. required: false - default: null + default: parent: type: str description: - - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used. + - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used. required: false - default: null + default: quotas: type: dict description: - - The tenant quotas. - - All parameters case sensitive. - - 'Valid attributes are:' - - ' - C(cpu_allocated) (int): use null to remove the quota.' - - ' - C(mem_allocated) (GB): use null to remove the quota.' - - ' - C(storage_allocated) (GB): use null to remove the quota.' - - ' - C(vms_allocated) (int): use null to remove the quota.' - - ' - C(templates_allocated) (int): use null to remove the quota.' + - The tenant quotas. + - All parameters case sensitive. + - 'Valid attributes are:' + - '- V(cpu_allocated) (int): use null to remove the quota.' + - '- V(mem_allocated) (GB): use null to remove the quota.' + - '- V(storage_allocated) (GB): use null to remove the quota.' + - '- V(vms_allocated) (int): use null to remove the quota.' + - '- V(templates_allocated) (int): use null to remove the quota.' required: false default: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the root tenant in ManageIQ community.general.manageiq_tenant: name: 'My Company' @@ -83,7 +83,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Create a tenant in ManageIQ community.general.manageiq_tenant: @@ -94,7 +94,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete a tenant in ManageIQ community.general.manageiq_tenant: @@ -105,7 +105,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated community.general.manageiq_tenant: @@ -114,12 +114,12 @@ EXAMPLES = ''' quotas: - cpu_allocated: 100 - mem_allocated: 50 - - vms_allocated: null + - vms_allocated: manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete a tenant in ManageIQ using a token @@ -130,39 +130,39 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' +RETURN = r""" tenant: description: The tenant. returned: success type: complex contains: id: - description: The tenant id + description: The tenant id. returned: success type: int name: - description: The tenant name + description: The tenant name. returned: success type: str description: - description: The tenant description + description: The tenant description. returned: success type: str parent_id: - description: The id of the parent tenant + description: The id of the parent tenant. returned: success type: int quotas: - description: List of tenant quotas + description: List of tenant quotas. returned: success type: list sample: cpu_allocated: 100 mem_allocated: 50 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py index 0d8a81984f..a4d5c21dfc 100644 --- a/plugins/modules/manageiq_user.py +++ b/plugins/modules/manageiq_user.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_user short_description: Management of users in ManageIQ @@ -20,7 +19,6 @@ extends_documentation_fragment: author: Daniel Korn (@dkorn) description: - The manageiq_user module supports adding, updating and deleting users in ManageIQ. - attributes: check_mode: support: none @@ -31,7 +29,8 @@ options: state: type: str description: - - absent - user should not exist, present - user should be. + - V(absent) - user should not exist, + - V(present) - user should be. choices: ['absent', 'present'] default: 'present' userid: @@ -60,10 +59,11 @@ options: default: always choices: ['always', 'on_create'] description: - - V(always) will update passwords unconditionally. V(on_create) will only set the password for a newly created user. -''' + - V(always) will update passwords unconditionally. + - V(on_create) will only set the password for a newly created user. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new user in ManageIQ community.general.manageiq_user: userid: 'jdoe' @@ -75,7 +75,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Create a new user in ManageIQ using a token community.general.manageiq_user: @@ -87,7 +87,7 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete a user in ManageIQ community.general.manageiq_user: @@ -97,7 +97,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Delete a user in ManageIQ using a token community.general.manageiq_user: @@ -106,7 +106,7 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Update email of user in ManageIQ community.general.manageiq_user: @@ -116,7 +116,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! - name: Update email of user in ManageIQ using a token community.general.manageiq_user: @@ -125,11 +125,11 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: false # only do this when you trust the network! -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/mas.py b/plugins/modules/mas.py index 8bb80840ca..3659c97636 100644 --- a/plugins/modules/mas.py +++ b/plugins/modules/mas.py @@ -10,54 +10,54 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: mas short_description: Manage Mac App Store applications with mas-cli description: - - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). + - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). version_added: '0.2.0' author: - - Michael Heap (@mheap) - - Lukas Bestle (@lukasbestle) + - Michael Heap (@mheap) + - Lukas Bestle (@lukasbestle) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - id: - description: - - The Mac App Store identifier of the app(s) you want to manage. - - This can be found by running C(mas search APP_NAME) on your machine. - type: list - elements: int - state: - description: - - Desired state of the app installation. - - The V(absent) value requires root permissions, also see the examples. - type: str - choices: - - absent - - latest - - present - default: present - upgrade_all: - description: - - Upgrade all installed Mac App Store apps. - type: bool - default: false - aliases: ["upgrade"] + id: + description: + - The Mac App Store identifier of the app(s) you want to manage. + - This can be found by running C(mas search APP_NAME) on your machine. + type: list + elements: int + state: + description: + - Desired state of the app installation. + - The V(absent) value requires root permissions, also see the examples. + type: str + choices: + - absent + - latest + - present + default: present + upgrade_all: + description: + - Upgrade all installed Mac App Store apps. + type: bool + default: false + aliases: ["upgrade"] requirements: - - macOS 10.11+ - - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" - - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). - - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+. - - Users need to sign in via the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417). -''' + - macOS 10.11 or higher. + - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" + - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). + - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+. + - Users need to sign in to the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Keynote community.general.mas: id: 409183694 @@ -99,9 +99,9 @@ EXAMPLES = ''' id: 413857545 state: absent become: true # Uninstallation requires root permissions -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule import os diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py index 0b419c8d93..8a9fcf175c 100644 --- a/plugins/modules/matrix.py +++ b/plugins/modules/matrix.py @@ -8,58 +8,57 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: "Jan Christian Grünhage (@jcgruenhage)" module: matrix short_description: Send notifications to matrix description: - - This module sends html formatted notifications to matrix rooms. + - This module sends html formatted notifications to matrix rooms. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - msg_plain: - type: str - description: - - Plain text form of the message to send to matrix, usually markdown - required: true - msg_html: - type: str - description: - - HTML form of the message to send to matrix - required: true - room_id: - type: str - description: - - ID of the room to send the notification to - required: true - hs_url: - type: str - description: - - URL of the homeserver, where the CS-API is reachable - required: true - token: - type: str - description: - - Authentication token for the API call. If provided, user_id and password are not required - user_id: - type: str - description: - - The user id of the user - password: - type: str - description: - - The password to log in with + msg_plain: + type: str + description: + - Plain text form of the message to send to matrix, usually markdown. + required: true + msg_html: + type: str + description: + - HTML form of the message to send to matrix. + required: true + room_id: + type: str + description: + - ID of the room to send the notification to. + required: true + hs_url: + type: str + description: + - URL of the homeserver, where the CS-API is reachable. + required: true + token: + type: str + description: + - Authentication token for the API call. If provided, O(user_id) and O(password) are not required. + user_id: + type: str + description: + - The user id of the user. + password: + type: str + description: + - The password to log in with. requirements: - - matrix-client (Python library) -''' + - matrix-client (Python library) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send matrix notification with token community.general.matrix: msg_plain: "**hello world**" @@ -76,10 +75,10 @@ EXAMPLES = ''' hs_url: "https://matrix.org" user_id: "ansible_notification_bot" password: "{{ matrix_auth_password }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py index af8ce69600..4b39c64170 100644 --- a/plugins/modules/mattermost.py +++ b/plugins/modules/mattermost.py @@ -15,14 +15,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: mattermost short_description: Send Mattermost notifications description: - - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. + - Sends notifications to U(http://your.mattermost.url) using the Incoming WebHook integration. author: "Benjamin Jolivot (@bjolivot)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -32,15 +32,13 @@ options: url: type: str description: - - Mattermost url (i.e. http://mattermost.yourcompany.com). + - Mattermost url (for example V(http://mattermost.yourcompany.com)). required: true api_key: type: str description: - - Mattermost webhook api key. Log into your mattermost site, go to - Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. - This will give you full URL. O(api_key) is the last part. - http://mattermost.example.com/hooks/C(API_KEY) + - Mattermost webhook api key. Log into your mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. This + will give you full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY). required: true text: type: str @@ -73,17 +71,16 @@ options: type: str description: - Set a priority for the message. - choices: [ important, urgent ] + choices: [important, urgent] version_added: 10.0.0 validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. default: true type: bool -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Mattermost community.general.mattermost: url: http://mattermost.example.com @@ -117,16 +114,16 @@ EXAMPLES = """ short: true """ -RETURN = ''' +RETURN = r""" payload: - description: Mattermost payload - returned: success - type: str + description: Mattermost payload. + returned: success + type: str webhook_url: - description: URL the webhook is sent to - returned: success - type: str -''' + description: URL the webhook is sent to. + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py index e239b4a164..7193626999 100644 --- a/plugins/modules/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -11,171 +11,165 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: maven_artifact short_description: Downloads an Artifact from a Maven Repository description: - - Downloads an artifact from a maven repository given the maven coordinates provided to the module. - - Can retrieve snapshots or release versions of the artifact and will resolve the latest available - version if one is not available. + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. + - Can retrieve snapshots or release versions of the artifact and will resolve the latest available version if one is not available. author: "Chris Schmidt (@chrisisbeef)" requirements: - - lxml - - boto if using a S3 repository (V(s3://...)) + - lxml + - boto if using a S3 repository (V(s3://...)) attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - group_id: - type: str - description: - - The Maven groupId coordinate. - required: true - artifact_id: - type: str - description: - - The maven artifactId coordinate. - required: true - version: - type: str - description: - - The maven version coordinate. - - Mutually exclusive with O(version_by_spec). - version_by_spec: - type: str - description: - - The maven dependency version ranges. - - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) - - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. - - Mutually exclusive with O(version). - version_added: '0.2.0' - classifier: - type: str - description: - - The maven classifier coordinate. - default: '' - extension: - type: str - description: - - The maven type/extension coordinate. - default: jar - repository_url: - type: str - description: - - The URL of the Maven Repository to download from. - - Use V(s3://...) if the repository is hosted on Amazon S3. - - Use V(file://...) if the repository is local. - default: https://repo1.maven.org/maven2 - username: - type: str - description: - - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. - aliases: [ "aws_secret_key" ] - password: - type: str - description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3. - aliases: [ "aws_secret_access_key" ] - headers: - description: - - Add custom HTTP headers to a request in hash/dict format. - type: dict - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - default: false - type: bool - version_added: '0.2.0' - dest: - type: path - description: - - The path where the artifact should be written to. - - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. - required: true - state: - type: str - description: - - The desired state of the artifact. - default: present - choices: [present,absent] - timeout: - type: int - description: - - Specifies a timeout in seconds for the connection attempt. - default: 10 - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists. - type: bool - default: true - client_cert: - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - This file can also include the key as well, and if the key is included, O(client_key) is not required. - type: path - version_added: '1.3.0' - client_key: - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - If O(client_cert) contains both the certificate and key, this option is not required. - type: path - version_added: '1.3.0' - keep_name: - description: - - If V(true), the downloaded artifact's name is preserved, i.e the version number remains part of it. - - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) - is defined. - type: bool - default: false - verify_checksum: - type: str - description: - - If V(never), the MD5/SHA1 checksum will never be downloaded and verified. - - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. - - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, - to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) - downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error - if the artifact has not been cached yet, it may fail unexpectedly. - If you still need it, you should consider using V(always) instead - if you deal with a checksum, it is better to - use it to verify integrity after download. - - V(always) combines V(download) and V(change). - required: false - default: 'download' - choices: ['never', 'download', 'change', 'always'] - checksum_alg: - type: str - description: - - If V(md5), checksums will use the MD5 algorithm. This is the default. - - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use - FIPS-compliant algorithms, since MD5 will be blocked on such systems. - default: 'md5' - choices: ['md5', 'sha1'] - version_added: 3.2.0 - unredirected_headers: - type: list - elements: str - version_added: 5.2.0 - description: - - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function. - - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]). - - Useful if the redirection URL does not need to have sensitive headers in the request. - - Requires ansible-core version 2.12 or later. - directory_mode: - type: str - description: - - Filesystem permission mode applied recursively to O(dest) when it is a directory. + group_id: + type: str + description: + - The Maven groupId coordinate. + required: true + artifact_id: + type: str + description: + - The maven artifactId coordinate. + required: true + version: + type: str + description: + - The maven version coordinate. + - Mutually exclusive with O(version_by_spec). + version_by_spec: + type: str + description: + - The maven dependency version ranges. + - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution). + - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. + - Mutually exclusive with O(version). + version_added: '0.2.0' + classifier: + type: str + description: + - The maven classifier coordinate. + default: '' + extension: + type: str + description: + - The maven type/extension coordinate. + default: jar + repository_url: + type: str + description: + - The URL of the Maven Repository to download from. + - Use V(s3://...) if the repository is hosted on Amazon S3. + - Use V(file://...) if the repository is local. + default: https://repo1.maven.org/maven2 + username: + type: str + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. + aliases: ["aws_secret_key"] + password: + type: str + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3. + aliases: ["aws_secret_access_key"] + headers: + description: + - Add custom HTTP headers to a request in hash/dict format. + type: dict + force_basic_auth: + description: + - Httplib2, the library used by the uri module only sends authentication information when a webservice responds to an initial request with + a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This option forces the sending of the Basic + authentication header upon initial request. + default: false + type: bool + version_added: '0.2.0' + dest: + type: path + description: + - The path where the artifact should be written to. + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. + required: true + state: + type: str + description: + - The desired state of the artifact. + default: present + choices: [present, absent] + timeout: + type: int + description: + - Specifies a timeout in seconds for the connection attempt. + default: 10 + validate_certs: + description: + - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists. + type: bool + default: true + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - This file can also include the key as well, and if the key is included, O(client_key) is not required. + type: path + version_added: '1.3.0' + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - If O(client_cert) contains both the certificate and key, this option is not required. + type: path + version_added: '1.3.0' + keep_name: + description: + - If V(true), the downloaded artifact's name is preserved, in other words the version number remains part of it. + - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is defined. + type: bool + default: false + verify_checksum: + type: str + description: + - If V(never), the MD5/SHA1 checksum will never be downloaded and verified. + - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. + - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, to verify if they are identical. + This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, and since some repository + software, when acting as a proxy/cache, return a 404 error if the artifact has not been cached yet, it may fail unexpectedly. If you still + need it, you should consider using V(always) instead - if you deal with a checksum, it is better to use it to verify integrity after download. + - V(always) combines V(download) and V(change). + required: false + default: 'download' + choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If V(md5), checksums will use the MD5 algorithm. This is the default. + - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms, since MD5 + will be blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 + unredirected_headers: + type: list + elements: str + version_added: 5.2.0 + description: + - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function. + - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]). + - Useful if the redirection URL does not need to have sensitive headers in the request. + - Requires ansible-core version 2.12 or later. + directory_mode: + type: str + description: + - Filesystem permission mode applied recursively to O(dest) when it is a directory. extends_documentation_fragment: - - ansible.builtin.files - - community.general.attributes -''' + - ansible.builtin.files + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Download the latest version of the JUnit framework artifact from Maven Central community.general.maven_artifact: group_id: junit @@ -236,7 +230,7 @@ EXAMPLES = ''' artifact_id: junit version_by_spec: "[3.8,4.0)" dest: /tmp/ -''' +""" import hashlib import os From 6aadcc72d13d37f17dead369395b51c0f8680757 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 27 Dec 2024 01:41:54 +1300 Subject: [PATCH 416/482] [mem ... n]*.py: normalize docs (#9388) * [mem ... n]*.py: normalize docs * Update plugins/modules/netcup_dns.py Co-authored-by: Felix Fontein * netcup_dns: change type of RV(records) From complex to list of dicts. --------- Co-authored-by: Felix Fontein --- plugins/modules/memset_dns_reload.py | 56 +- plugins/modules/memset_memstore_info.py | 73 +- plugins/modules/memset_server_info.py | 67 +- plugins/modules/memset_zone.py | 99 +- plugins/modules/memset_zone_domain.py | 81 +- plugins/modules/memset_zone_record.py | 137 +- plugins/modules/mksysb.py | 41 +- plugins/modules/modprobe.py | 91 +- plugins/modules/monit.py | 26 +- plugins/modules/mqtt.py | 66 +- plugins/modules/mssql_db.py | 41 +- plugins/modules/mssql_script.py | 248 ++- plugins/modules/nagios.py | 38 +- plugins/modules/netcup_dns.py | 84 +- plugins/modules/newrelic_deployment.py | 28 +- plugins/modules/nexmo.py | 24 +- plugins/modules/nginx_status_info.py | 17 +- plugins/modules/nictagadm.py | 43 +- plugins/modules/nmcli.py | 2416 +++++++++++------------ plugins/modules/nomad_job.py | 75 +- plugins/modules/nomad_job_info.py | 28 +- plugins/modules/nomad_token.py | 89 +- plugins/modules/nosh.py | 498 +++-- plugins/modules/npm.py | 13 +- plugins/modules/nsupdate.py | 208 +- 25 files changed, 2245 insertions(+), 2342 deletions(-) diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py index 8cff51ade1..100f81fc05 100644 --- a/plugins/modules/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -8,53 +8,47 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_dns_reload author: "Simon Weald (@glitchcrab)" short_description: Request reload of Memset's DNS infrastructure, notes: - - DNS reload requests are a best-effort service provided by Memset; these generally - happen every 15 minutes by default, however you can request an immediate reload if - later tasks rely on the records being created. An API key generated via the - Memset customer control panel is required with the following minimum scope - - C(dns.reload). If you wish to poll the job status to wait until the reload has - completed, then C(job.status) is also required. + - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, however you can request + an immediate reload if later tasks rely on the records being created. An API key generated using the Memset customer control panel is required + with the following minimum scope - C(dns.reload). If you wish to poll the job status to wait until the reload has completed, then C(job.status) + is also required. description: - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - poll: - default: false - type: bool - description: - - Boolean value, if set will poll the reload job's status and return - when the job has completed (unless the 30 second timeout is reached first). - If the timeout is reached then the task will not be marked as failed, but - stderr will indicate that the polling failed. -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + poll: + default: false + type: bool + description: + - Boolean value, if set will poll the reload job's status and return when the job has completed (unless the 30 second timeout is reached + first). If the timeout is reached then the task will not be marked as failed, but stderr will indicate that the polling failed. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Submit DNS reload and poll community.general.memset_dns_reload: api_key: 5eb86c9196ab03919abcf03857163741 poll: true delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: description: Raw response from the Memset API. returned: always @@ -85,7 +79,7 @@ memset_api: returned: always type: str sample: "dns" -''' +""" from time import sleep diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py index 5dfd1f956a..e9f2699812 100644 --- a/plugins/modules/memset_memstore_info.py +++ b/plugins/modules/memset_memstore_info.py @@ -8,107 +8,104 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_memstore_info author: "Simon Weald (@glitchcrab)" short_description: Retrieve Memstore product usage information notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - C(memstore.usage). + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(memstore.usage). description: - - Retrieve Memstore product usage information. + - Retrieve Memstore product usage information. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The Memstore product name (that is, C(mstestyaa1)). -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The Memstore product name (that is, V(mstestyaa1)). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get usage for mstestyaa1 community.general.memset_memstore_info: name: mstestyaa1 api_key: 5eb86c9896ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: - description: Info from the Memset API + description: Info from the Memset API. returned: always type: complex contains: cdn_bandwidth: - description: Dictionary of CDN bandwidth facts + description: Dictionary of CDN bandwidth facts. returned: always type: complex contains: bytes_out: - description: Outbound CDN bandwidth for the last 24 hours in bytes + description: Outbound CDN bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 requests: - description: Number of requests in the last 24 hours + description: Number of requests in the last 24 hours. returned: always type: int sample: 10 bytes_in: - description: Inbound CDN bandwidth for the last 24 hours in bytes + description: Inbound CDN bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 containers: - description: Number of containers + description: Number of containers. returned: always type: int sample: 10 bytes: - description: Space used in bytes + description: Space used in bytes. returned: always type: int sample: 3860997965 objs: - description: Number of objects + description: Number of objects. returned: always type: int sample: 1000 bandwidth: - description: Dictionary of CDN bandwidth facts + description: Dictionary of CDN bandwidth facts. returned: always type: complex contains: bytes_out: - description: Outbound bandwidth for the last 24 hours in bytes + description: Outbound bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 requests: - description: Number of requests in the last 24 hours + description: Number of requests in the last 24 hours. returned: always type: int sample: 10 bytes_in: - description: Inbound bandwidth for the last 24 hours in bytes + description: Inbound bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py index 40862ae944..3c0829ce09 100644 --- a/plugins/modules/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -8,48 +8,45 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_server_info author: "Simon Weald (@glitchcrab)" short_description: Retrieve server information notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - C(server.info). + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(server.info). description: - - Retrieve server information. + - Retrieve server information. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The server product name (that is, C(testyaa1)). -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The server product name (that is, C(testyaa1)). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get details for testyaa1 community.general.memset_server_info: name: testyaa1 api_key: 5eb86c9896ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: - description: Info from the Memset API + description: Info from the Memset API. returned: always type: complex contains: @@ -59,7 +56,7 @@ memset_api: type: bool sample: true control_panel: - description: Whether the server has a control panel (i.e. cPanel). + description: Whether the server has a control panel (for example cPanel). returned: always type: str sample: 'cpanel' @@ -103,7 +100,7 @@ memset_api: } } firewall_type: - description: The type of firewall the server has (i.e. self-managed, managed). + description: The type of firewall the server has (for example self-managed, managed). returned: always type: str sample: 'managed' @@ -113,7 +110,7 @@ memset_api: type: str sample: 'testyaa1.miniserver.com' ignore_monitoring_off: - description: When true, Memset won't remind the customer that monitoring is disabled. + description: When true, Memset does not remind the customer that monitoring is disabled. returned: always type: bool sample: true @@ -136,7 +133,7 @@ memset_api: type: bool sample: true monitoring_level: - description: The server's monitoring level (i.e. basic). + description: The server's monitoring level (for example V(basic)). returned: always type: str sample: 'basic' @@ -149,7 +146,7 @@ memset_api: description: The network zone(s) the server is in. returned: always type: list - sample: [ 'reading' ] + sample: ['reading'] nickname: description: Customer-set nickname for the server. returned: always @@ -196,7 +193,7 @@ memset_api: type: str sample: 'GBP' renewal_price_vat: - description: VAT rate for renewal payments + description: VAT rate for renewal payments. returned: always type: str sample: '20' @@ -206,7 +203,7 @@ memset_api: type: str sample: '2013-04-10' status: - description: Current status of the server (i.e. live, onhold). + description: Current status of the server (for example live, onhold). returned: always type: str sample: 'LIVE' @@ -216,7 +213,7 @@ memset_api: type: str sample: 'managed' type: - description: What this server is (i.e. dedicated) + description: What this server is (for example V(dedicated)). returned: always type: str sample: 'miniserver' @@ -233,7 +230,7 @@ memset_api: returned: always type: str sample: 'basic' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py index e405ad3e86..4d8804e3be 100644 --- a/plugins/modules/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -8,60 +8,55 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone author: "Simon Weald (@glitchcrab)" short_description: Creates and deletes Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point to the same IP). An API key + generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). description: - Manage DNS zones in a Memset account. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - required: true - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - name: - required: true - description: - - The zone nickname; usually the same as the main domain. Ensure this - value has at most 250 characters. - type: str - aliases: [ nickname ] - ttl: - description: - - The default TTL for all records created in the zone. This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). - type: int - default: 0 - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - force: - required: false - default: false - type: bool - description: - - Forces deletion of a zone and all zone domains/zone records it contains. -''' + state: + required: true + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + name: + required: true + description: + - The zone nickname; usually the same as the main domain. Ensure this value has at most 250 characters. + type: str + aliases: [nickname] + ttl: + description: + - The default TTL for all records created in the zone. This must be a valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). + type: int + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + force: + required: false + default: false + type: bool + description: + - Forces deletion of a zone and all zone domains/zone records it contains. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create the zone 'test' - name: Create zone community.general.memset_zone: @@ -79,40 +74,40 @@ EXAMPLES = ''' api_key: 5eb86c9196ab03919abcf03857163741 force: true delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" memset_api: - description: Zone info from the Memset API + description: Zone info from the Memset API. returned: when state == present type: complex contains: domains: - description: List of domains in this zone + description: List of domains in this zone. returned: always type: list sample: [] id: - description: Zone id + description: Zone id. returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" nickname: - description: Zone name + description: Zone name. returned: always type: str sample: "example.com" records: - description: List of DNS records for domains in this zone + description: List of DNS records for domains in this zone. returned: always type: list sample: [] ttl: - description: Default TTL for domains in this zone + description: Default TTL for domains in this zone. returned: always type: int sample: 300 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import check_zone diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py index 7443e6c256..ca4b07aaaf 100644 --- a/plugins/modules/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -8,53 +8,50 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone_domain author: "Simon Weald (@glitchcrab)" short_description: Create and delete domains in Memset DNS zones notes: - - Zone domains can be thought of as a collection of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list). - - Currently this module can only create one domain at a time. Multiple domains should - be created using C(loop). + - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they point to the same IP). An API + key generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), + C(dns.zone_domain_list). + - Currently this module can only create one domain at a time. Multiple domains should be created using C(loop). description: - Manage DNS zone domains in a Memset account. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - domain: - required: true - description: - - The zone domain name. Ensure this value has at most 250 characters. - type: str - aliases: ['name'] - zone: - required: true - description: - - The zone to add the domain to (this must already exist). - type: str -''' + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + domain: + required: true + description: + - The zone domain name. Ensure this value has at most 250 characters. + type: str + aliases: ['name'] + zone: + required: true + description: + - The zone to add the domain to (this must already exist). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create the zone domain 'test.com' - name: Create zone domain community.general.memset_zone_domain: @@ -63,25 +60,25 @@ EXAMPLES = ''' state: present api_key: 5eb86c9196ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" memset_api: - description: Domain info from the Memset API + description: Domain info from the Memset API. returned: when changed or state == present type: complex contains: domain: - description: Domain name + description: Domain name. returned: always type: str sample: "example.com" id: - description: Domain ID + description: Domain ID. returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 349240b84e..553cd66926 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -8,83 +8,78 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone_record author: "Simon Weald (@glitchcrab)" short_description: Create and delete records in Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). - - Currently this module can only create one DNS record at a time. Multiple records - should be created using C(loop). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point to the same IP). An API key + generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Currently this module can only create one DNS record at a time. Multiple records should be created using C(loop). description: - Manage DNS records in a Memset account. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - address: - required: true - description: - - The address for this record (can be IP or text string depending on record type). - type: str - aliases: [ ip, data ] - priority: - description: - - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). - type: int - default: 0 - record: - required: false - description: - - The subdomain to create. - type: str - default: '' - type: - required: true - description: - - The type of DNS record to create. - choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] - type: str - relative: - type: bool - default: false - description: - - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) - and C(SRV)record types. - ttl: - description: - - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). - default: 0 - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - type: int - zone: - required: true - description: - - The name of the zone to which to add the record to. - type: str -''' + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + address: + required: true + description: + - The address for this record (can be IP or text string depending on record type). + type: str + aliases: [ip, data] + priority: + description: + - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). + type: int + default: 0 + record: + required: false + description: + - The subdomain to create. + type: str + default: '' + type: + required: true + description: + - The type of DNS record to create. + choices: [A, AAAA, CNAME, MX, NS, SRV, TXT] + type: str + relative: + type: bool + default: false + description: + - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) and C(SRV)record types. + ttl: + description: + - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a valid int from + U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + type: int + zone: + required: true + description: + - The name of the zone to which to add the record to. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create DNS record for www.domain.com - name: Create DNS record community.general.memset_zone_record: @@ -118,11 +113,11 @@ EXAMPLES = ''' address: "{{ item.address }}" delegate_to: localhost with_items: - - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } - - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } -''' + - {'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4'} + - {'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1'} +""" -RETURN = ''' +RETURN = r""" memset_api: description: Record info from the Memset API. returned: when state == present @@ -168,7 +163,7 @@ memset_api: returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index d1f49ca82e..d3c9abeac0 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -10,20 +10,19 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" author: Kairo Araujo (@kairoaraujo) module: mksysb short_description: Generates AIX mksysb rootvg backups description: -- This module manages a basic AIX mksysb (image) of rootvg. + - This module manages a basic AIX mksysb (image) of rootvg. seealso: -- name: C(mksysb) command manual page - description: Manual page for the command. - link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command + - name: C(mksysb) command manual page + description: Manual page for the command. + link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command extends_documentation_fragment: -- community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -32,63 +31,62 @@ attributes: options: backup_crypt_files: description: - - Backup encrypted files. + - Backup encrypted files. type: bool default: true backup_dmapi_fs: description: - - Back up DMAPI filesystem files. + - Back up DMAPI filesystem files. type: bool default: true create_map_files: description: - - Creates a new MAP files. + - Creates a new MAP files. type: bool default: false exclude_files: description: - - Excludes files using C(/etc/rootvg.exclude). + - Excludes files using C(/etc/rootvg.exclude). type: bool default: false exclude_wpar_files: description: - - Excludes WPAR files. + - Excludes WPAR files. type: bool default: false extended_attrs: description: - - Backup extended attributes. + - Backup extended attributes. type: bool default: true name: type: str description: - - Backup name + - Backup name. required: true new_image_data: description: - - Creates a new file data. + - Creates a new file data. type: bool default: true software_packing: description: - - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). + - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). type: bool default: false storage_path: type: str description: - - Storage path where the mksysb will stored. + - Storage path where the mksysb will stored. required: true use_snapshot: description: - - Creates backup using snapshots. + - Creates backup using snapshots. type: bool default: false """ -EXAMPLES = """ ---- +EXAMPLES = r""" - name: Running a backup image mksysb community.general.mksysb: name: myserver @@ -97,8 +95,7 @@ EXAMPLES = """ exclude_wpar_files: true """ -RETURN = """ ---- +RETURN = r""" changed: description: Return changed for mksysb actions as true or false. returned: always diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index 3d6a7c2410..b7b93cce2b 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -8,59 +8,58 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: modprobe short_description: Load or unload kernel modules author: - - David Stygstra (@stygstra) - - Julien Dauphant (@jdauphant) - - Matt Jeffery (@mattjeffery) + - David Stygstra (@stygstra) + - Julien Dauphant (@jdauphant) + - Matt Jeffery (@mattjeffery) description: - - Load or unload kernel modules. + - Load or unload kernel modules. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - required: true - description: - - Name of kernel module to manage. - state: - type: str - description: - - Whether the module should be present or absent. - choices: [ absent, present ] - default: present - params: - type: str - description: - - Modules parameters. - default: '' - persistent: - type: str - choices: [ disabled, absent, present ] - default: disabled - version_added: 7.0.0 - description: - - Persistency between reboots for configured module. - - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. - - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot. - - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be - loaded on next reboot. - - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. - - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the - kernel modules themselves instead of configuration like this. - - In fact, most modern kernel modules are prepared for automatic loading already. - - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled)." -''' + name: + type: str + required: true + description: + - Name of kernel module to manage. + state: + type: str + description: + - Whether the module should be present or absent. + choices: [absent, present] + default: present + params: + type: str + description: + - Modules parameters. + default: '' + persistent: + type: str + choices: [disabled, absent, present] + default: disabled + version_added: 7.0.0 + description: + - Persistency between reboots for configured module. + - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. + - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot. + - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module + will not be loaded on next reboot. + - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. + - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded + in the kernel modules themselves instead of configuration like this. + - In fact, most modern kernel modules are prepared for automatic loading already. + - B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add the 802.1q module community.general.modprobe: name: 8021q @@ -78,7 +77,7 @@ EXAMPLES = ''' state: present params: 'numdummies=2' persistent: present -''' +""" import os.path import platform diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py index 5475ab1e52..f97cd12e64 100644 --- a/plugins/modules/monit.py +++ b/plugins/modules/monit.py @@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: monit -short_description: Manage the state of a program monitored via Monit +short_description: Manage the state of a program monitored using Monit description: - - Manage the state of a program monitored via Monit. + - Manage the state of a program monitored using Monit. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -32,26 +31,25 @@ options: description: - The state of service. required: true - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] + choices: ["present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded"] type: str timeout: description: - - If there are pending actions for the service monitored by monit, then Ansible will check - for up to this many seconds to verify the requested action has been performed. - Ansible will sleep for five seconds between each check. + - If there are pending actions for the service monitored by monit, then Ansible will check for up to this many seconds to verify the requested + action has been performed. Ansible will sleep for five seconds between each check. default: 300 type: int author: - - Darryl Stoflet (@dstoflet) - - Simon Kelly (@snopoke) -''' + - Darryl Stoflet (@dstoflet) + - Simon Kelly (@snopoke) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Manage the state of program httpd to be in started state community.general.monit: name: httpd state: started -''' +""" import time import re diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py index f8d64e6a00..462f809ade 100644 --- a/plugins/modules/mqtt.py +++ b/plugins/modules/mqtt.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: mqtt short_description: Publish a message on an MQTT topic for the IoT description: @@ -26,12 +25,12 @@ options: server: type: str description: - - MQTT broker address/name + - MQTT broker address/name. default: localhost port: type: int description: - - MQTT broker port number + - MQTT broker port number. default: 1883 username: type: str @@ -44,76 +43,67 @@ options: client_id: type: str description: - - MQTT client identifier + - MQTT client identifier. - If not specified, a value C(hostname + pid) will be used. topic: type: str description: - - MQTT topic name + - MQTT topic name. required: true payload: type: str description: - - Payload. The special string V("None") may be used to send a NULL - (that is, empty) payload which is useful to simply notify with the O(topic) + - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply notify with the O(topic) or to clear previously retained messages. required: true qos: type: str description: - - QoS (Quality of Service) + - QoS (Quality of Service). default: "0" - choices: [ "0", "1", "2" ] + choices: ["0", "1", "2"] retain: description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. + - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently subscribe to the topic can + received the last retained message immediately. type: bool default: false ca_cert: type: path description: - - The path to the Certificate Authority certificate files that are to be - treated as trusted by this client. If this is the only option given - then the client will operate in a similar manner to a web browser. That - is to say it will require the broker to have a certificate signed by the - Certificate Authorities in ca_certs and will communicate using TLS v1, - but will not attempt any form of authentication. This provides basic - network encryption but may not be sufficient depending on how the broker - is configured. - aliases: [ ca_certs ] + - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this is the only option given + then the client will operate in a similar manner to a web browser. That is to say it will require the broker to have a certificate signed + by the Certificate Authorities in ca_certs and will communicate using TLS v1, but will not attempt any form of authentication. This provides + basic network encryption but may not be sufficient depending on how the broker is configured. + aliases: [ca_certs] client_cert: type: path description: - - The path pointing to the PEM encoded client certificate. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ certfile ] + - The path pointing to the PEM encoded client certificate. If this is not None it will be used as client information for TLS based authentication. + Support for this feature is broker dependent. + aliases: [certfile] client_key: type: path description: - - The path pointing to the PEM encoded client private key. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ keyfile ] + - The path pointing to the PEM encoded client private key. If this is not None it will be used as client information for TLS based authentication. + Support for this feature is broker dependent. + aliases: [keyfile] tls_version: description: - Specifies the version of the SSL/TLS protocol to be used. - - By default (if the python version supports it) the highest TLS version is - detected. If unavailable, TLS v1 is used. + - By default (if the python version supports it) the highest TLS version is detected. If unavailable, TLS v1 is used. type: str choices: - tlsv1.1 - tlsv1.2 -requirements: [ mosquitto ] +requirements: [mosquitto] notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). + - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) + Python client (U(https://pypi.org/project/paho-mqtt/)). author: "Jan-Piet Mens (@jpmens)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Publish a message on an MQTT topic community.general.mqtt: topic: 'service/ansible/{{ ansible_hostname }}' @@ -122,7 +112,7 @@ EXAMPLES = ''' retain: false client_id: ans001 delegate_to: localhost -''' +""" # =========================================== # MQTT module support methods. diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py index a85f721fca..95f529aff3 100644 --- a/plugins/modules/mssql_db.py +++ b/plugins/modules/mssql_db.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: mssql_db short_description: Add or remove MSSQL databases from a remote host description: @@ -26,56 +25,54 @@ attributes: options: name: description: - - name of the database to add or remove + - Name of the database to add or remove. required: true - aliases: [ db ] + aliases: [db] type: str login_user: description: - - The username used to authenticate with + - The username used to authenticate with. type: str default: '' login_password: description: - - The password used to authenticate with + - The password used to authenticate with. type: str default: '' login_host: description: - - Host running the database + - Host running the database. type: str required: true login_port: description: - - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used + - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used. default: '1433' type: str state: description: - - The database state + - The database state. default: present - choices: [ "present", "absent", "import" ] + choices: ["present", "absent", "import"] type: str target: description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) files are supported. + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are supported. type: str autocommit: description: - - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed - within a transaction. + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can not + be changed within a transaction. type: bool default: false notes: - - Requires the pymssql Python package on the remote host. For Ubuntu, this - is as easy as pip install pymssql (See M(ansible.builtin.pip).) + - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as pip install pymssql (See M(ansible.builtin.pip)). requirements: - - pymssql + - pymssql author: Vedit Firat Arig (@vedit) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new database with name 'jackdata' community.general.mssql_db: name: jackdata @@ -92,11 +89,11 @@ EXAMPLES = ''' name: my_db state: import target: /tmp/dump.sql -''' +""" -RETURN = ''' +RETURN = r""" # -''' +""" import os import traceback diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py index b1713092c8..045cafde88 100644 --- a/plugins/modules/mssql_script.py +++ b/plugins/modules/mssql_script.py @@ -7,8 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: mssql_script short_description: Execute SQL scripts on a MSSQL database @@ -17,77 +16,74 @@ version_added: "4.0.0" description: - Execute SQL scripts on a MSSQL database. - extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: partial - details: - - The script will not be executed in check mode. - diff_mode: - support: none + check_mode: + support: partial + details: + - The script will not be executed in check mode. + diff_mode: + support: none options: - name: - description: Database to run script against. - aliases: [ db ] - default: '' - type: str - login_user: - description: The username used to authenticate with. - type: str - login_password: - description: The password used to authenticate with. - type: str - login_host: - description: Host running the database. - type: str - required: true - login_port: - description: Port of the MSSQL server. Requires O(login_host) be defined as well. - default: 1433 - type: int - script: - description: - - The SQL script to be executed. - - Script can contain multiple SQL statements. Multiple Batches can be separated by V(GO) command. - - Each batch must return at least one result set. - required: true - type: str - transaction: - description: - - If transactional mode is requested, start a transaction and commit the change only if the script succeed. - Otherwise, rollback the transaction. - - If transactional mode is not requested (default), automatically commit the change. - type: bool - default: false - version_added: 8.4.0 - output: - description: - - With V(default) each row will be returned as a list of values. See RV(query_results). - - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict). - - V(dict) requires named columns to be returned by each query otherwise an error is thrown. - choices: [ "dict", "default" ] - default: 'default' - type: str - params: - description: | - Parameters passed to the script as SQL parameters. - (Query V('SELECT %(name\)s"') with V(example: '{"name": "John Doe"}).)' - type: dict + name: + description: Database to run script against. + aliases: [db] + default: '' + type: str + login_user: + description: The username used to authenticate with. + type: str + login_password: + description: The password used to authenticate with. + type: str + login_host: + description: Host running the database. + type: str + required: true + login_port: + description: Port of the MSSQL server. Requires O(login_host) be defined as well. + default: 1433 + type: int + script: + description: + - The SQL script to be executed. + - Script can contain multiple SQL statements. Multiple Batches can be separated by V(GO) command. + - Each batch must return at least one result set. + required: true + type: str + transaction: + description: + - If transactional mode is requested, start a transaction and commit the change only if the script succeed. Otherwise, rollback the transaction. + - If transactional mode is not requested (default), automatically commit the change. + type: bool + default: false + version_added: 8.4.0 + output: + description: + - With V(default) each row will be returned as a list of values. See RV(query_results). + - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict). + - V(dict) requires named columns to be returned by each query otherwise an error is thrown. + choices: ["dict", "default"] + default: 'default' + type: str + params: + description: |- + Parameters passed to the script as SQL parameters. + (Query V('SELECT %(name\)s"') with V(example: '{"name": "John Doe"}).)'. + type: dict notes: - - Requires the pymssql Python package on the remote host. For Ubuntu, this - is as easy as C(pip install pymssql) (See M(ansible.builtin.pip).) + - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as C(pip install pymssql) (See M(ansible.builtin.pip)). requirements: - - pymssql + - pymssql author: - - Kris Budde (@kbudde) -''' + - Kris Budde (@kbudde) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Check DB connection community.general.mssql_script: login_user: "{{ mssql_login_user }}" @@ -140,11 +136,11 @@ EXAMPLES = r''' register: result_batches - assert: that: - - result_batches.query_results | length == 2 # two batch results - - result_batches.query_results[0] | length == 2 # two selects in first batch - - result_batches.query_results[0][0] | length == 1 # one row in first select - - result_batches.query_results[0][0][0] | length == 1 # one column in first row - - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. + - result_batches.query_results | length == 2 # two batch results + - result_batches.query_results[0] | length == 2 # two selects in first batch + - result_batches.query_results[0][0] | length == 1 # one row in first select + - result_batches.query_results[0][0][0] | length == 1 # one column in first row + - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. - name: two batches with dict output community.general.mssql_script: @@ -161,68 +157,68 @@ EXAMPLES = r''' register: result_batches_dict - assert: that: - - result_batches_dict.query_results_dict | length == 2 # two batch results - - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch - - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select - - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row -''' + - result_batches_dict.query_results_dict | length == 2 # two batch results + - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch + - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select + - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row +""" -RETURN = r''' +RETURN = r""" query_results: - description: List of batches (queries separated by V(GO) keyword). - type: list - elements: list - returned: success and O(output=default) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - - Use the V(GO) keyword in O(script) to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_value: - description: - - List of column values. - - Any non-standard JSON type is converted to string. - type: list - example: ["Batch 0 - Select 0"] - returned: success, if output is default + description: List of batches (queries separated by V(GO) keyword). + type: list + elements: list + returned: success and O(output=default) + sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + contains: + queries: + description: + - List of result sets of each query. + - If a query returns no results, the results of this and all the following queries will not be included in the output. + - Use the V(GO) keyword in O(script) to separate queries. + type: list + elements: list + contains: + rows: + description: List of rows returned by query. + type: list + elements: list + contains: + column_value: + description: + - List of column values. + - Any non-standard JSON type is converted to string. + type: list + example: ["Batch 0 - Select 0"] + returned: success, if output is default query_results_dict: - description: List of batches (queries separated by V(GO) keyword). - type: list - elements: list - returned: success and O(output=dict) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - Use 'GO' keyword to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_dict: - description: - - Dictionary of column names and values. - - Any non-standard JSON type is converted to string. - type: dict - example: {"col_name": "Batch 0 - Select 0"} - returned: success, if output is dict -''' + description: List of batches (queries separated by V(GO) keyword). + type: list + elements: list + returned: success and O(output=dict) + sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + contains: + queries: + description: + - List of result sets of each query. + - If a query returns no results, the results of this and all the following queries will not be included in the output. Use 'GO' keyword + to separate queries. + type: list + elements: list + contains: + rows: + description: List of rows returned by query. + type: list + elements: list + contains: + column_dict: + description: + - Dictionary of column names and values. + - Any non-standard JSON type is converted to string. + type: dict + example: {"col_name": "Batch 0 - Select 0"} + returned: success, if output is dict +""" from ansible.module_utils.basic import AnsibleModule, missing_required_lib import traceback diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py index 0f1f0b7c50..7a0c26b48e 100644 --- a/plugins/modules/nagios.py +++ b/plugins/modules/nagios.py @@ -14,20 +14,19 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: nagios short_description: Perform common tasks in Nagios related to downtime and notifications description: - - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." + - 'The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts.' - The C(nagios) module is not idempotent. - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g. O(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge for the I(host itself), - for example O(services=host). This keyword may not be given with other services at the same time. - B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) - To schedule downtime for all services on particular host use keyword "all", for example O(services=all). + - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet). + - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge for the I(host + itself), for example O(services=host). This keyword may not be given with other services at the same time. B(Setting alerts/downtime/acknowledge + for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) To schedule downtime for all services on particular + host use keyword "all", for example O(services=all). extends_documentation_fragment: - community.general.attributes attributes: @@ -41,9 +40,8 @@ options: - Action to take. - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. required: true - choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", - "servicegroup_host_downtime", "acknowledge", "forced_check" ] + choices: ["downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", "silence_nagios", "unsilence_nagios", + "command", "servicegroup_service_downtime", "servicegroup_host_downtime", "acknowledge", "forced_check"] type: str host: description: @@ -51,18 +49,16 @@ options: type: str cmdfile: description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. + - Path to the nagios I(command file) (FIFO pipe). Only required if auto-detection fails. type: str author: description: - - Author to leave downtime comments as. - Only used when O(action) is V(downtime) or V(acknowledge). + - Author to leave downtime comments as. Only used when O(action) is V(downtime) or V(acknowledge). type: str default: Ansible comment: description: - - Comment when O(action) is V(downtime) or V(acknowledge). + - Comment when O(action) is V(downtime) or V(acknowledge). type: str default: Scheduling downtime start: @@ -79,8 +75,8 @@ options: services: description: - What to manage downtime/alerts for. Separate multiple services with commas. - - "B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts)." - aliases: [ "service" ] + - 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).' + aliases: ["service"] type: str servicegroup: description: @@ -94,9 +90,9 @@ options: type: str author: "Tim Bielawa (@tbielawa)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set 30 minutes of apache downtime community.general.nagios: action: downtime @@ -245,7 +241,7 @@ EXAMPLES = ''' community.general.nagios: action: command command: DISABLE_FAILURE_PREDICTION -''' +""" import time import os.path diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py index cba70c0fa3..370aaa5dca 100644 --- a/plugins/modules/netcup_dns.py +++ b/plugins/modules/netcup_dns.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: netcup_dns notes: [] short_description: Manage Netcup DNS records description: - - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)." + - Manages DNS records using the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php). extends_documentation_fragment: - community.general.attributes attributes: @@ -26,12 +25,12 @@ attributes: options: api_key: description: - - "API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))." + - API key for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). required: true type: str api_password: description: - - "API password for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))." + - API password for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). required: true type: str customer_id: @@ -48,7 +47,7 @@ options: description: - Record to add or delete, supports wildcard (V(*)). Default is V(@) (that is, the zone name). default: "@" - aliases: [ name ] + aliases: [name] type: str type: description: @@ -80,7 +79,7 @@ options: - Whether the record should exist or not. required: false default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str timeout: description: @@ -91,10 +90,9 @@ options: requirements: - "nc-dnsapi >= 0.1.3" author: "Nicolai Buchwitz (@nbuchwitz)" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a record of type A community.general.netcup_dns: api_key: "..." @@ -156,41 +154,41 @@ EXAMPLES = ''' type: "A" value: "127.0.0.1" timeout: 30 +""" -''' - -RETURN = ''' +RETURN = r""" records: - description: list containing all records - returned: success - type: complex - contains: - name: - description: the record name - returned: success - type: str - sample: fancy-hostname - type: - description: the record type - returned: success - type: str - sample: A - value: - description: the record destination - returned: success - type: str - sample: 127.0.0.1 - priority: - description: the record priority (only relevant if type=MX) - returned: success - type: int - sample: 0 - id: - description: internal id of the record - returned: success - type: int - sample: 12345 -''' + description: List containing all records. + returned: success + type: list + elements: dict + contains: + name: + description: The record name. + returned: success + type: str + sample: fancy-hostname + type: + description: The record type. + returned: success + type: str + sample: A + value: + description: The record destination. + returned: success + type: str + sample: 127.0.0.1 + priority: + description: The record priority (only relevant if RV(records[].type=MX)). + returned: success + type: int + sample: 0 + id: + description: Internal id of the record. + returned: success + type: int + sample: 12345 +""" import traceback diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py index e5a1160822..99ff996670 100644 --- a/plugins/modules/newrelic_deployment.py +++ b/plugins/modules/newrelic_deployment.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: newrelic_deployment author: "Matt Coddington (@mcodd)" short_description: Notify New Relic about app deployments description: - - Notify New Relic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/) + - Notify New Relic about app deployments (see U(https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)). extends_documentation_fragment: - community.general.attributes attributes: @@ -44,49 +43,48 @@ options: changelog: type: str description: - - A list of changes for this deployment + - A list of changes for this deployment. required: false description: type: str description: - - Text annotation for the deployment - notes for you + - Text annotation for the deployment - notes for you. required: false revision: type: str description: - - A revision number (e.g., git commit SHA) + - A revision number (for example, git commit SHA). required: true user: type: str description: - - The name of the user/process that triggered this deployment + - The name of the user/process that triggered this deployment. required: false validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: true type: bool app_name_exact_match: type: bool description: - - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. - If set to V(false) it returns the first result. + - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to V(false) it returns + the first result. required: false default: false version_added: 7.5.0 requirements: [] -''' +""" -EXAMPLES = ''' -- name: Notify New Relic about an app deployment +EXAMPLES = r""" +- name: Notify New Relic about an app deployment community.general.newrelic_deployment: token: AAAAAA app_name: myapp user: ansible deployment revision: '1.0' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py index 39f127f98c..3293362ec3 100644 --- a/plugins/modules/nexmo.py +++ b/plugins/modules/nexmo.py @@ -9,11 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: nexmo -short_description: Send a SMS via nexmo +short_description: Send a SMS using nexmo description: - - Send a SMS message via nexmo + - Send a SMS message using nexmo. author: "Matt Martz (@sivel)" attributes: check_mode: @@ -24,42 +24,40 @@ options: api_key: type: str description: - - Nexmo API Key + - Nexmo API Key. required: true api_secret: type: str description: - - Nexmo API Secret + - Nexmo API Secret. required: true src: type: int description: - - Nexmo Number to send from + - Nexmo Number to send from. required: true dest: type: list elements: int description: - - Phone number(s) to send SMS message to + - Phone number(s) to send SMS message to. required: true msg: type: str description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages + - Message to text to send. Messages longer than 160 characters will be split into multiple messages. required: true validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true extends_documentation_fragment: - ansible.builtin.url - community.general.attributes -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Nexmo community.general.nexmo: api_key: 640c8a53 diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py index 6bbea078b0..7fa681d6d8 100644 --- a/plugins/modules/nginx_status_info.py +++ b/plugins/modules/nginx_status_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: nginx_status_info short_description: Retrieve information on nginx status description: @@ -34,9 +33,9 @@ options: notes: - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Gather status info from nginx on localhost - name: Get current http stats community.general.nginx_status_info: @@ -49,10 +48,9 @@ EXAMPLES = r''' url: http://localhost/nginx_status timeout: 20 register: result -''' +""" -RETURN = r''' ---- +RETURN = r""" active_connections: description: Active connections. returned: success @@ -64,7 +62,8 @@ accepts: type: int sample: 81769947 handled: - description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have + been reached. returned: success type: int sample: 81769947 @@ -93,7 +92,7 @@ data: returned: success type: str sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py index 5b81861e8f..a02a8fcffd 100644 --- a/plugins/modules/nictagadm.py +++ b/plugins/modules/nictagadm.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: nictagadm short_description: Manage nic tags on SmartOS systems description: @@ -26,39 +25,39 @@ attributes: options: name: description: - - Name of the nic tag. + - Name of the nic tag. required: true type: str mac: description: - - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub). - - Parameters O(mac) and O(etherstub) are mutually exclusive. + - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub). + - Parameters O(mac) and O(etherstub) are mutually exclusive. type: str etherstub: description: - - Specifies that the nic tag will be attached to a created O(etherstub). - - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac). + - Specifies that the nic tag will be attached to a created O(etherstub). + - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac). type: bool default: false mtu: description: - - Specifies the size of the O(mtu) of the desired nic tag. - - Parameters O(mtu) and O(etherstub) are mutually exclusive. + - Specifies the size of the O(mtu) of the desired nic tag. + - Parameters O(mtu) and O(etherstub) are mutually exclusive. type: int force: description: - - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. + - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. type: bool default: false state: description: - - Create or delete a SmartOS nic tag. + - Create or delete a SmartOS nic tag. type: str - choices: [ absent, present ] + choices: [absent, present] default: present -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create 'storage0' on '00:1b:21:a3:f5:4d' community.general.nictagadm: name: storage0 @@ -70,11 +69,11 @@ EXAMPLES = r''' community.general.nictagadm: name: storage0 state: absent -''' +""" -RETURN = r''' +RETURN = r""" name: - description: nic tag name + description: Nic tag name. returned: always type: str sample: storage0 @@ -84,26 +83,26 @@ mac: type: str sample: 00:1b:21:a3:f5:4d etherstub: - description: specifies if the nic tag will create and attach to an etherstub. + description: Specifies if the nic tag will create and attach to an etherstub. returned: always type: bool sample: false mtu: - description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. + description: Specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. returned: always type: int sample: 1500 force: - description: Shows if -f was used during the deletion of a nic tag + description: Shows if -f was used during the deletion of a nic tag. returned: always type: bool sample: false state: - description: state of the target + description: State of the target. returned: always type: str sample: present -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 4ea6799577..a8784e870c 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -9,1090 +9,1067 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: nmcli author: - - Chris Long (@alcamie101) + - Chris Long (@alcamie101) short_description: Manage Networking requirements: - - nmcli + - nmcli extends_documentation_fragment: - - community.general.attributes + - community.general.attributes description: - - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.' - - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' - - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' - - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager' - - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' + - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team), V(bond), V(vlan) and so on. + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager.' + - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - state: + state: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + - Using O(state=present) to create connection will automatically bring connection up. + - Using O(state=up) and O(state=down) will not modify connection with other parameters. These states have been added in community.general + 9.5.0. + type: str + required: true + choices: [absent, present, up, down] + autoconnect: + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated. + type: bool + default: true + conn_name: + description: + - The name used to call the connection. Pattern is V([-][-]). + type: str + required: true + conn_reload: + description: + - Whether the connection should be reloaded if it was modified. + type: bool + required: false + default: false + version_added: 9.5.0 + ifname: + description: + - The interface to bind the connection to. + - The connection will only be applicable to this interface name. + - A special value of V(*) can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. + - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it. + type: str + type: + description: + - This is the type of device or network connection that you wish to create or modify. + - Type V(dummy) is added in community.general 3.5.0. + - Type V(gsm) is added in community.general 3.7.0. + - Type V(infiniband) is added in community.general 2.0.0. + - Type V(loopback) is added in community.general 8.1.0. + - Type V(macvlan) is added in community.general 6.6.0. + - Type V(ovs-bridge) is added in community.general 8.6.0. + - Type V(ovs-interface) is added in community.general 8.6.0. + - Type V(ovs-port) is added in community.general 8.6.0. + - Type V(wireguard) is added in community.general 4.3.0. + - Type V(vpn) is added in community.general 5.1.0. + - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option. + - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option. + type: str + choices: [bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan, + wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, loopback] + mode: + description: + - This is the type of device or network connection that you wish to create for a bond or bridge. + type: str + choices: [802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast] + default: balance-rr + transport_mode: + description: + - This option sets the connection type of Infiniband IPoIB devices. + type: str + choices: [datagram, connected] + version_added: 5.8.0 + slave_type: + description: + - Type of the device of this slave's master connection (for example V(bond)). + - Type V(ovs-port) is added in community.general 8.6.0. + type: str + choices: ['bond', 'bridge', 'team', 'ovs-port'] + version_added: 7.0.0 + master: + description: + - Master [-][-]. + - The Type Of Service. + type: int + route_metric4: + description: + - Set metric level of ipv4 routes configured on interface. + type: int + version_added: 2.0.0 + routing_rules4: + description: + - Is the same as in an C(ip rule add) command, except always requires specifying a priority. + type: list + elements: str + version_added: 3.3.0 + never_default4: + description: + - Set as default route. + - This parameter is mutually_exclusive with gw4 parameter. + type: bool + default: false + version_added: 2.0.0 + dns4: + description: + - A list of up to 3 DNS servers. + - The entries must be IPv4 addresses, for example V(192.0.2.53). + elements: str + type: list + dns4_search: + description: + - A list of DNS search domains. + elements: str + type: list + dns4_options: + description: + - A list of DNS options. + elements: str + type: list + version_added: 7.2.0 + dns4_ignore_auto: + description: + - Ignore automatically configured IPv4 name servers. + type: bool + default: false + version_added: 3.2.0 + method4: + description: + - Configuration method to be used for IPv4. + - If O(ip4) is set, C(ipv4.method) is automatically set to V(manual) and this parameter is not needed. + type: str + choices: [auto, link-local, manual, shared, disabled] + version_added: 2.2.0 + may_fail4: + description: + - If you need O(ip4) configured before C(network-online.target) is reached, set this option to V(false). + - This option applies when O(method4) is not V(disabled). + type: bool + default: true + version_added: 3.3.0 + ip6: + description: + - List of IPv6 addresses to this interface. + - Use the format V(abbe::cafe/128) or V(abbe::cafe). + - If defined and O(method6) is not specified, automatically set C(ipv6.method) to V(manual). + type: list + elements: str + gw6: + description: + - The IPv6 gateway for this interface. + - Use the format V(2001:db8::1). + type: str + gw6_ignore_auto: + description: + - Ignore automatically configured IPv6 routes. + type: bool + default: false + version_added: 3.2.0 + routes6: + description: + - The list of IPv6 routes. + - Use the format V(fd12:3456:789a:1::/64 2001:dead:beef::1). + - To specify more complex routes, use the O(routes6_extended) option. + type: list + elements: str + version_added: 4.4.0 + routes6_extended: + description: + - The list of IPv6 routes but with parameters. + type: list + elements: dict + suboptions: + ip: + description: + - IP or prefix of route. + - Use the format V(fd12:3456:789a:1::/64). type: str required: true - conn_reload: + next_hop: description: - - Whether the connection should be reloaded if it was modified. + - Use the format V(2001:dead:beef::1). + type: str + metric: + description: + - Route metric. + type: int + table: + description: + - The table to add this route to. + - The default depends on C(ipv6.route-table). + type: int + cwnd: + description: + - The clamp for congestion window. + type: int + mtu: + description: + - If non-zero, only transmit packets of the specified size or smaller. + type: int + onlink: + description: + - Pretend that the nexthop is directly attached to this link, even if it does not match any interface prefix. type: bool - required: false - default: false - version_added: 9.5.0 - ifname: + route_metric6: + description: + - Set metric level of IPv6 routes configured on interface. + type: int + version_added: 4.4.0 + dns6: + description: + - A list of up to 3 DNS servers. + - The entries must be IPv6 addresses, for example V(2001:4860:4860::8888). + elements: str + type: list + dns6_search: + description: + - A list of DNS search domains. + elements: str + type: list + dns6_options: + description: + - A list of DNS options. + elements: str + type: list + version_added: 7.2.0 + dns6_ignore_auto: + description: + - Ignore automatically configured IPv6 name servers. + type: bool + default: false + version_added: 3.2.0 + method6: + description: + - Configuration method to be used for IPv6. + - If O(ip6) is set, C(ipv6.method) is automatically set to V(manual) and this parameter is not needed. + - V(disabled) was added in community.general 3.3.0. + type: str + choices: [ignore, auto, dhcp, link-local, manual, shared, disabled] + version_added: 2.2.0 + ip_privacy6: + description: + - If enabled, it makes the kernel generate a temporary IPv6 address in addition to the public one. + type: str + choices: [disabled, prefer-public-addr, prefer-temp-addr, unknown] + version_added: 4.2.0 + addr_gen_mode6: + description: + - Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration. + - V(default) and V(default-or-eui64) have been added in community.general 6.5.0. + type: str + choices: [default, default-or-eui64, eui64, stable-privacy] + version_added: 4.2.0 + mtu: + description: + - The connection MTU, for example V(9000). This can not be applied when creating the interface and is done once the interface has been created. + - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband). + - This parameter defaults to V(1500) when unset. + type: int + dhcp_client_id: + description: + - DHCP Client Identifier sent to the DHCP server. + type: str + primary: + description: + - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'. + type: str + miimon: + description: + - This is only used with bond - miimon. + - This parameter defaults to V(100) when unset. + type: int + downdelay: + description: + - This is only used with bond - downdelay. + type: int + updelay: + description: + - This is only used with bond - updelay. + type: int + xmit_hash_policy: + description: + - This is only used with bond - xmit_hash_policy type. + type: str + version_added: 5.6.0 + arp_interval: + description: + - This is only used with bond - ARP interval. + type: int + arp_ip_target: + description: + - This is only used with bond - ARP IP target. + type: str + stp: + description: + - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge. + type: bool + default: true + priority: + description: + - This is only used with 'bridge' - sets STP priority. + type: int + default: 128 + forwarddelay: + description: + - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds. + type: int + default: 15 + hellotime: + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. + type: int + default: 2 + maxage: + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. + type: int + default: 20 + ageingtime: + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. + type: int + default: 300 + mac: + description: + - MAC address of the connection. + - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. + type: str + slavepriority: + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. + type: int + default: 32 + path_cost: + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations using this slave. + type: int + default: 100 + hairpin: + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame + was received on. + - The default change to V(false) in community.general 7.0.0. It used to be V(true) before. + type: bool + default: false + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [broadcast, roundrobin, activebackup, loadbalance, lacp] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices should be set during the team lifetime. + type: str + choices: [same_all, by_active, only_active] + version_added: 3.4.0 + runner_fast_rate: + description: + - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets will be sent + once per second. Otherwise they will be sent every 30 seconds. + - Only allowed for O(runner=lacp). + type: bool + version_added: 6.5.0 + vlanid: + description: + - This is only used with VLAN - VLAN ID in range <0-4095>. + type: int + vlandev: + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname. + type: str + flags: + description: + - This is only used with VLAN - flags. + type: str + ingress: + description: + - This is only used with VLAN - VLAN ingress priority mapping. + type: str + egress: + description: + - This is only used with VLAN - VLAN egress priority mapping. + type: str + vxlan_id: + description: + - This is only used with VXLAN - VXLAN ID. + type: int + vxlan_remote: + description: + - This is only used with VXLAN - VXLAN destination IP address. + type: str + vxlan_local: + description: + - This is only used with VXLAN - VXLAN local IP address. + type: str + ip_tunnel_dev: + description: + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. + type: str + ip_tunnel_remote: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. + type: str + ip_tunnel_local: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. + type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + zone: + description: + - The trust level of the connection. + - When updating this property on a currently activated connection, the change takes effect immediately. + type: str + version_added: 2.0.0 + wifi_sec: + description: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: V({key-mgmt: wpa-psk, psk: my_password}).' + type: dict + suboptions: + auth-alg: description: - - The interface to bind the connection to. - - The connection will only be applicable to this interface name. - - A special value of V('*') can be used for interface-independent connections. - - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. - - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it. + - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication algorithm required + by the AP here. + - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username) and + O(wifi_sec.leap-password) properties must be specified. type: str - type: + choices: [open, shared, leap] + fils: description: - - This is the type of device or network connection that you wish to create or modify. - - Type V(dummy) is added in community.general 3.5.0. - - Type V(gsm) is added in community.general 3.7.0. - - Type V(infiniband) is added in community.general 2.0.0. - - Type V(loopback) is added in community.general 8.1.0. - - Type V(macvlan) is added in community.general 6.6.0. - - Type V(ovs-bridge) is added in community.general 8.6.0. - - Type V(ovs-interface) is added in community.general 8.6.0. - - Type V(ovs-port) is added in community.general 8.6.0. - - Type V(wireguard) is added in community.general 4.3.0. - - Type V(vpn) is added in community.general 5.1.0. - - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option. - - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option. - type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan, - wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, loopback ] - mode: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access point support it) + or V(3) (enable FILS and fail if not supported). + - When set to V(0) and no global default is set, FILS will be optionally enabled. + type: int + choices: [0, 1, 2, 3] + default: 0 + group: description: - - This is the type of device or network connection that you wish to create for a bond or bridge. - type: str - choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] - default: balance-rr - transport_mode: - description: - - This option sets the connection type of Infiniband IPoIB devices. - type: str - choices: [ datagram, connected ] - version_added: 5.8.0 - slave_type: - description: - - Type of the device of this slave's master connection (for example V(bond)). - - Type V(ovs-port) is added in community.general 8.6.0. - type: str - choices: [ 'bond', 'bridge', 'team', 'ovs-port' ] - version_added: 7.0.0 - master: - description: - - Master